repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_panelnd.py | 2 | 3445 | # -*- coding: utf-8 -*-
import nose
from pandas.core import panelnd
from pandas.core.panel import Panel
from pandas.util.testing import assert_panel_equal
import pandas.util.testing as tm
class TestPanelnd(tm.TestCase):
def setUp(self):
pass
def test_4d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_alt(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa
def test_4d_construction_error(self):
# create a 4D
self.assertRaises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.ix['C1', :, :, 0:3, :]
expected = p4d.ix[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
harikishen/addons-server | src/olympia/amo/tasks.py | 1 | 2584 | import datetime
from django.core.mail import EmailMessage, EmailMultiAlternatives
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.celery import task
from olympia.amo.utils import get_email_backend
from olympia.bandwagon.models import Collection
from olympia.stats.models import Contribution
log = olympia.core.logger.getLogger('z.task')
@task
def send_email(recipient, subject, message, from_email=None,
html_message=None, attachments=None, real_email=False,
cc=None, headers=None, fail_silently=False, async=False,
max_retries=None, reply_to=None, **kwargs):
backend = EmailMultiAlternatives if html_message else EmailMessage
connection = get_email_backend(real_email)
result = backend(subject, message, from_email, to=recipient, cc=cc,
connection=connection, headers=headers,
attachments=attachments, reply_to=reply_to)
if html_message:
result.attach_alternative(html_message, 'text/html')
try:
result.send(fail_silently=False)
return True
except Exception as e:
log.error('send_mail failed with error: %s' % e)
if async:
return send_email.retry(exc=e, max_retries=max_retries)
elif not fail_silently:
raise
else:
return False
@task
def set_modified_on_object(obj, **kw):
"""Sets modified on one object at a time."""
try:
log.info('Setting modified on object: %s, %s' %
(obj.__class__.__name__, obj.pk))
obj.update(modified=datetime.datetime.now())
except Exception, e:
log.error('Failed to set modified on: %s, %s - %s' %
(obj.__class__.__name__, obj.pk, e))
@task
def delete_logs(items, **kw):
log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=amo.LOG_KEEP).delete()
@task
def delete_stale_contributions(items, **kw):
log.info('[%s@%s] Deleting stale contributions' %
(len(items), delete_stale_contributions.rate_limit))
Contribution.objects.filter(
transaction_id__isnull=True, pk__in=items).delete()
@task
def delete_anonymous_collections(items, **kw):
log.info('[%s@%s] Deleting anonymous collections' %
(len(items), delete_anonymous_collections.rate_limit))
Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS,
pk__in=items).delete()
| bsd-3-clause |
sctjkc01/ofCourse | ofcourse/participants.py | 1 | 3800 | import os
from datetime import datetime, date, timedelta
from urlparse import urlparse
import yaml
from flask import Blueprint, redirect
from flask.ext.mako import render_template
import ofcourse
from ofcourse.util import app_path, get_hw_keys
participants_bp = Blueprint('participants_bp',
__name__,
template_folder=app_path('templates'))
currentYear = str(date.today().year)
currentTerm = "fall" if date.today().month > 7 else "spring"
@participants_bp.route('/')
def participants_blank():
"""
This is the default landing
for the participants listing page.
It will list all of the participants
in the current term for HFOSS
"""
return participants_year_term(currentYear, currentTerm)
@participants_bp.route('/<year_or_nick>')
def participants_year(year_or_nick):
"""
This will get all the participants
within a given year
"""
p_url = find_participant(year_or_nick)
if p_url is not None:
# render individual page
return redirect(p_url)
# otherwise render as a year
return participants(year_or_nick + '/')
@participants_bp.route('/<year>/<term>')
def participants_year_term(year, term):
"""
This will get all the participants
within a given year and term
"""
return participants(year + '/' + term + '/')
@participants_bp.route('/all')
def participants_all():
return participants('')
"""
This will get all the participants
who have taken HFOSS
"""
def participants(root_dir):
"""
Render the participants page,
which shows a directory of all
the students with their forge
links, blog posts, assignment
links, and etc.
"""
yaml_dir = app_path('people', root_dir)
student_data = []
for dirpath, dirnames, files in os.walk(yaml_dir):
dirpath = dirpath.rstrip("/")
for fname in sorted(files):
if fname.endswith('.yaml'):
with open(dirpath + '/' + fname) as students:
contents = yaml.safe_load(students)
contents['yaml'] = dirpath + '/' + fname
year_term_data = dirpath.split('/')
contents['participant_page'] = "{y}/{t}/{u}".format(
y=year_term_data[-2],
t=year_term_data[-1],
u=os.path.splitext(fname)[0]
)
for forge in contents['forges']:
url = urlparse(forge)
if "github.com" in url.netloc:
contents['github'] = url.path[1:]
contents['isActive'] = (currentYear in year_term_data and
currentTerm in year_term_data)
student_data.append(contents)
assignments = get_hw_keys()
elapsed = (datetime.today() - ofcourse.site.COURSE_START).total_seconds()
target_number = int(elapsed / timedelta(weeks=1).total_seconds() + 1 +
len(assignments))
return render_template(
'blogs.mak', name='mako',
student_data=student_data,
gravatar=ofcourse.site.gravatar,
target_number=target_number,
hw_keys=assignments
)
def find_participant(nick):
yaml_dir = app_path('people')
for dirpath, dirnames, files in os.walk(yaml_dir):
for fname in files:
if (fname.lower().startswith(nick.lower()) and
fname.endswith('.yaml')):
participant = os.path.join(
dirpath,
fname
).replace(yaml_dir, '')
participant = participant.replace('.yaml', '')
return 'participants' + participant
| apache-2.0 |
doduytrung/odoo-8.0 | addons/account/wizard/account_validate_account_move.py | 381 | 3203 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class validate_account_move(osv.osv_memory):
_name = "validate.account.move"
_description = "Validate Account Move"
_columns = {
'journal_ids': fields.many2many('account.journal', 'wizard_validate_account_move_journal', 'wizard_id', 'journal_id', 'Journal', required=True),
'period_ids': fields.many2many('account.period', 'wizard_validate_account_move_period', 'wizard_id', 'period_id', 'Period', required=True, domain=[('state','<>','done')]),
}
def validate_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
if context is None:
context = {}
data = self.read(cr, uid, ids[0], context=context)
ids_move = obj_move.search(cr, uid, [('state','=','draft'),('journal_id','in',tuple(data['journal_ids'])),('period_id','in',tuple(data['period_ids']))], order='date')
if not ids_move:
raise osv.except_osv(_('Warning!'), _('Specified journals do not have any account move entries in draft state for the specified periods.'))
obj_move.button_validate(cr, uid, ids_move, context=context)
return {'type': 'ir.actions.act_window_close'}
class validate_account_move_lines(osv.osv_memory):
_name = "validate.account.move.lines"
_description = "Validate Account Move Lines"
def validate_move_lines(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
obj_move = self.pool.get('account.move')
move_ids = []
if context is None:
context = {}
data_line = obj_move_line.browse(cr, uid, context['active_ids'], context)
for line in data_line:
if line.move_id.state=='draft':
move_ids.append(line.move_id.id)
move_ids = list(set(move_ids))
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Selected Entry Lines does not have any account move entries in draft state.'))
obj_move.button_validate(cr, uid, move_ids, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
akarki15/mozillians | vendor-local/lib/python/unidecode/x07e.py | 252 | 4682 | data = (
'Xia ', # 0x00
'Yuan ', # 0x01
'Zong ', # 0x02
'Xu ', # 0x03
'Nawa ', # 0x04
'Odoshi ', # 0x05
'Geng ', # 0x06
'Sen ', # 0x07
'Ying ', # 0x08
'Jin ', # 0x09
'Yi ', # 0x0a
'Zhui ', # 0x0b
'Ni ', # 0x0c
'Bang ', # 0x0d
'Gu ', # 0x0e
'Pan ', # 0x0f
'Zhou ', # 0x10
'Jian ', # 0x11
'Cuo ', # 0x12
'Quan ', # 0x13
'Shuang ', # 0x14
'Yun ', # 0x15
'Xia ', # 0x16
'Shuai ', # 0x17
'Xi ', # 0x18
'Rong ', # 0x19
'Tao ', # 0x1a
'Fu ', # 0x1b
'Yun ', # 0x1c
'Zhen ', # 0x1d
'Gao ', # 0x1e
'Ru ', # 0x1f
'Hu ', # 0x20
'Zai ', # 0x21
'Teng ', # 0x22
'Xian ', # 0x23
'Su ', # 0x24
'Zhen ', # 0x25
'Zong ', # 0x26
'Tao ', # 0x27
'Horo ', # 0x28
'Cai ', # 0x29
'Bi ', # 0x2a
'Feng ', # 0x2b
'Cu ', # 0x2c
'Li ', # 0x2d
'Suo ', # 0x2e
'Yin ', # 0x2f
'Xi ', # 0x30
'Zong ', # 0x31
'Lei ', # 0x32
'Zhuan ', # 0x33
'Qian ', # 0x34
'Man ', # 0x35
'Zhi ', # 0x36
'Lu ', # 0x37
'Mo ', # 0x38
'Piao ', # 0x39
'Lian ', # 0x3a
'Mi ', # 0x3b
'Xuan ', # 0x3c
'Zong ', # 0x3d
'Ji ', # 0x3e
'Shan ', # 0x3f
'Sui ', # 0x40
'Fan ', # 0x41
'Shuai ', # 0x42
'Beng ', # 0x43
'Yi ', # 0x44
'Sao ', # 0x45
'Mou ', # 0x46
'Zhou ', # 0x47
'Qiang ', # 0x48
'Hun ', # 0x49
'Sem ', # 0x4a
'Xi ', # 0x4b
'Jung ', # 0x4c
'Xiu ', # 0x4d
'Ran ', # 0x4e
'Xuan ', # 0x4f
'Hui ', # 0x50
'Qiao ', # 0x51
'Zeng ', # 0x52
'Zuo ', # 0x53
'Zhi ', # 0x54
'Shan ', # 0x55
'San ', # 0x56
'Lin ', # 0x57
'Yu ', # 0x58
'Fan ', # 0x59
'Liao ', # 0x5a
'Chuo ', # 0x5b
'Zun ', # 0x5c
'Jian ', # 0x5d
'Rao ', # 0x5e
'Chan ', # 0x5f
'Rui ', # 0x60
'Xiu ', # 0x61
'Hui ', # 0x62
'Hua ', # 0x63
'Zuan ', # 0x64
'Xi ', # 0x65
'Qiang ', # 0x66
'Un ', # 0x67
'Da ', # 0x68
'Sheng ', # 0x69
'Hui ', # 0x6a
'Xi ', # 0x6b
'Se ', # 0x6c
'Jian ', # 0x6d
'Jiang ', # 0x6e
'Huan ', # 0x6f
'Zao ', # 0x70
'Cong ', # 0x71
'Jie ', # 0x72
'Jiao ', # 0x73
'Bo ', # 0x74
'Chan ', # 0x75
'Yi ', # 0x76
'Nao ', # 0x77
'Sui ', # 0x78
'Yi ', # 0x79
'Shai ', # 0x7a
'Xu ', # 0x7b
'Ji ', # 0x7c
'Bin ', # 0x7d
'Qian ', # 0x7e
'Lan ', # 0x7f
'Pu ', # 0x80
'Xun ', # 0x81
'Zuan ', # 0x82
'Qi ', # 0x83
'Peng ', # 0x84
'Li ', # 0x85
'Mo ', # 0x86
'Lei ', # 0x87
'Xie ', # 0x88
'Zuan ', # 0x89
'Kuang ', # 0x8a
'You ', # 0x8b
'Xu ', # 0x8c
'Lei ', # 0x8d
'Xian ', # 0x8e
'Chan ', # 0x8f
'Kou ', # 0x90
'Lu ', # 0x91
'Chan ', # 0x92
'Ying ', # 0x93
'Cai ', # 0x94
'Xiang ', # 0x95
'Xian ', # 0x96
'Zui ', # 0x97
'Zuan ', # 0x98
'Luo ', # 0x99
'Xi ', # 0x9a
'Dao ', # 0x9b
'Lan ', # 0x9c
'Lei ', # 0x9d
'Lian ', # 0x9e
'Si ', # 0x9f
'Jiu ', # 0xa0
'Yu ', # 0xa1
'Hong ', # 0xa2
'Zhou ', # 0xa3
'Xian ', # 0xa4
'He ', # 0xa5
'Yue ', # 0xa6
'Ji ', # 0xa7
'Wan ', # 0xa8
'Kuang ', # 0xa9
'Ji ', # 0xaa
'Ren ', # 0xab
'Wei ', # 0xac
'Yun ', # 0xad
'Hong ', # 0xae
'Chun ', # 0xaf
'Pi ', # 0xb0
'Sha ', # 0xb1
'Gang ', # 0xb2
'Na ', # 0xb3
'Ren ', # 0xb4
'Zong ', # 0xb5
'Lun ', # 0xb6
'Fen ', # 0xb7
'Zhi ', # 0xb8
'Wen ', # 0xb9
'Fang ', # 0xba
'Zhu ', # 0xbb
'Yin ', # 0xbc
'Niu ', # 0xbd
'Shu ', # 0xbe
'Xian ', # 0xbf
'Gan ', # 0xc0
'Xie ', # 0xc1
'Fu ', # 0xc2
'Lian ', # 0xc3
'Zu ', # 0xc4
'Shen ', # 0xc5
'Xi ', # 0xc6
'Zhi ', # 0xc7
'Zhong ', # 0xc8
'Zhou ', # 0xc9
'Ban ', # 0xca
'Fu ', # 0xcb
'Zhuo ', # 0xcc
'Shao ', # 0xcd
'Yi ', # 0xce
'Jing ', # 0xcf
'Dai ', # 0xd0
'Bang ', # 0xd1
'Rong ', # 0xd2
'Jie ', # 0xd3
'Ku ', # 0xd4
'Rao ', # 0xd5
'Die ', # 0xd6
'Heng ', # 0xd7
'Hui ', # 0xd8
'Gei ', # 0xd9
'Xuan ', # 0xda
'Jiang ', # 0xdb
'Luo ', # 0xdc
'Jue ', # 0xdd
'Jiao ', # 0xde
'Tong ', # 0xdf
'Geng ', # 0xe0
'Xiao ', # 0xe1
'Juan ', # 0xe2
'Xiu ', # 0xe3
'Xi ', # 0xe4
'Sui ', # 0xe5
'Tao ', # 0xe6
'Ji ', # 0xe7
'Ti ', # 0xe8
'Ji ', # 0xe9
'Xu ', # 0xea
'Ling ', # 0xeb
'[?] ', # 0xec
'Xu ', # 0xed
'Qi ', # 0xee
'Fei ', # 0xef
'Chuo ', # 0xf0
'Zhang ', # 0xf1
'Gun ', # 0xf2
'Sheng ', # 0xf3
'Wei ', # 0xf4
'Mian ', # 0xf5
'Shou ', # 0xf6
'Beng ', # 0xf7
'Chou ', # 0xf8
'Tao ', # 0xf9
'Liu ', # 0xfa
'Quan ', # 0xfb
'Zong ', # 0xfc
'Zhan ', # 0xfd
'Wan ', # 0xfe
'Lu ', # 0xff
)
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/drawing/fill.py | 10 | 10642 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Bool,
Integer,
Set,
NoneSet,
Typed,
MinMax,
Sequence,
)
from openpyxl.descriptors.excel import Relation
from openpyxl.descriptors.nested import (
NestedNoneSet,
NestedSequence,
)
from openpyxl.xml.constants import DRAWING_NS
from .colors import ColorChoice
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
from .effect import *
"""
Fill elements from drawing main schema
"""
class PatternFillProperties(Serialisable):
tagname = "pattFill"
prst = NoneSet(values=(['pct5', 'pct10', 'pct20', 'pct25', 'pct30', 'pct40',
'pct50', 'pct60', 'pct70', 'pct75', 'pct80', 'pct90', 'horz', 'vert',
'ltHorz', 'ltVert', 'dkHorz', 'dkVert', 'narHorz', 'narVert', 'dashHorz',
'dashVert', 'cross', 'dnDiag', 'upDiag', 'ltDnDiag', 'ltUpDiag',
'dkDnDiag', 'dkUpDiag', 'wdDnDiag', 'wdUpDiag', 'dashDnDiag',
'dashUpDiag', 'diagCross', 'smCheck', 'lgCheck', 'smGrid', 'lgGrid',
'dotGrid', 'smConfetti', 'lgConfetti', 'horzBrick', 'diagBrick',
'solidDmnd', 'openDmnd', 'dotDmnd', 'plaid', 'sphere', 'weave', 'divot',
'shingle', 'wave', 'trellis', 'zigZag']))
preset = Alias("prst")
fgClr = Typed(expected_type=ColorChoice, allow_none=True)
foreground = Alias("fgClr")
bgClr = Typed(expected_type=ColorChoice, allow_none=True)
background = Alias("bgClr")
__elements__ = ("fgClr", "bgClr")
def __init__(self,
prst=None,
fgClr=None,
bgClr=None,
):
self.prst = prst
self.fgClr = fgClr
self.bgClr = bgClr
class RelativeRect(Serialisable):
tagname = "rect"
namespace = DRAWING_NS
l = MinMax(min=0, max=100, allow_none=True)
left = Alias('l')
t = MinMax(min=0, max=100, allow_none=True)
top = Alias('t')
r = MinMax(min=0, max=100, allow_none=True)
right = Alias('r')
b = MinMax(min=0, max=100, allow_none=True)
bottom = Alias('b')
def __init__(self,
l=None,
t=None,
r=None,
b=None,
):
self.l = l
self.t = t
self.r = r
self.b = b
class StretchInfoProperties(Serialisable):
tagname = "stretch"
namespace = DRAWING_NS
fillRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
fillRect=None,
):
self.fillRect = fillRect
class GradientStop(Serialisable):
tagname = "gradStop"
pos = MinMax(min=0, max=100, allow_none=True)
# Color Choice Group
def __init__(self,
pos=None,
):
self.pos = pos
class GradientStopList(Serialisable):
tagname = "gradStopLst"
gs = Sequence(expected_type=GradientStop)
def __init__(self,
gs=None,
):
if gs is None:
gs = [GradientStop(), GradientStop()]
self.gs = gs
class LinearShadeProperties(Serialisable):
ang = Integer()
scaled = Bool(allow_none=True)
def __init__(self,
ang=None,
scaled=None,
):
self.ang = ang
self.scaled = scaled
class PathShadeProperties(Serialisable):
path = Set(values=(['shape', 'circle', 'rect']))
fillToRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
path=None,
fillToRect=None,
):
self.path = path
self.fillToRect = fillToRect
class GradientFillProperties(Serialisable):
tagname = "gradFill"
flip = NoneSet(values=(['x', 'y', 'xy']))
rotWithShape = Bool(allow_none=True)
gsLst = Typed(expected_type=GradientStopList, allow_none=True)
stop_list = Alias("gsLst")
lin = Typed(expected_type=LinearShadeProperties, allow_none=True)
linear = Alias("lin")
path = Typed(expected_type=PathShadeProperties, allow_none=True)
tileRect = Typed(expected_type=RelativeRect, allow_none=True)
__elements__ = ('gsLst', 'lin', 'path', 'tileRect')
def __init__(self,
flip=None,
rotWithShape=None,
gsLst=None,
lin=None,
path=None,
tileRect=None,
):
self.flip = flip
self.rotWithShape = rotWithShape
self.gsLst = gsLst
self.lin = lin
self.path = path
self.tileRect = tileRect
class Blip(Serialisable):
tagname = "blip"
namespace = DRAWING_NS
#Using attribute groupAG_Blob
cstate = NoneSet(values=(['email', 'screen', 'print', 'hqprint']))
embed = Relation() #rId
link = Relation() #hyperlink
noGrp = Bool(allow_none=True)
noSelect = Bool(allow_none=True)
noRot = Bool(allow_none=True)
noChangeAspect = Bool(allow_none=True)
noMove = Bool(allow_none=True)
noResize = Bool(allow_none=True)
noEditPoints = Bool(allow_none=True)
noAdjustHandles = Bool(allow_none=True)
noChangeArrowheads = Bool(allow_none=True)
noChangeShapeType = Bool(allow_none=True)
# some elements are choice
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
alphaBiLevel = Typed(expected_type=AlphaBiLevelEffect, allow_none=True)
alphaCeiling = Typed(expected_type=AlphaCeilingEffect, allow_none=True)
alphaFloor = Typed(expected_type=AlphaFloorEffect, allow_none=True)
alphaInv = Typed(expected_type=AlphaInverseEffect, allow_none=True)
alphaMod = Typed(expected_type=AlphaModulateEffect, allow_none=True)
alphaModFix = Typed(expected_type=AlphaModulateFixedEffect, allow_none=True)
alphaRepl = Typed(expected_type=AlphaReplaceEffect, allow_none=True)
biLevel = Typed(expected_type=BiLevelEffect, allow_none=True)
blur = Typed(expected_type=BlurEffect, allow_none=True)
clrChange = Typed(expected_type=ColorChangeEffect, allow_none=True)
clrRepl = Typed(expected_type=ColorReplaceEffect, allow_none=True)
duotone = Typed(expected_type=DuotoneEffect, allow_none=True)
fillOverlay = Typed(expected_type=FillOverlayEffect, allow_none=True)
grayscl = Typed(expected_type=GrayscaleEffect, allow_none=True)
hsl = Typed(expected_type=HSLEffect, allow_none=True)
lum = Typed(expected_type=LuminanceEffect, allow_none=True)
tint = Typed(expected_type=TintEffect, allow_none=True)
__elements__ = ('alphaBiLevel', 'alphaCeiling', 'alphaFloor', 'alphaInv',
'alphaMod', 'alphaModFix', 'alphaRepl', 'biLevel', 'blur', 'clrChange',
'clrRepl', 'duotone', 'fillOverlay', 'grayscl', 'hsl', 'lum', 'tint')
def __init__(self,
cstate=None,
embed=None,
link=None,
noGrp=None,
noSelect=None,
noRot=None,
noChangeAspect=None,
noMove=None,
noResize=None,
noEditPoints=None,
noAdjustHandles=None,
noChangeArrowheads=None,
noChangeShapeType=None,
extLst=None,
alphaBiLevel=None,
alphaCeiling=None,
alphaFloor=None,
alphaInv=None,
alphaMod=None,
alphaModFix=None,
alphaRepl=None,
biLevel=None,
blur=None,
clrChange=None,
clrRepl=None,
duotone=None,
fillOverlay=None,
grayscl=None,
hsl=None,
lum=None,
tint=None,
):
self.cstate = cstate
self.embed = embed
self.link = link
self.noGrp = noGrp
self.noSelect = noSelect
self.noRot = noRot
self.noChangeAspect = noChangeAspect
self.noMove = noMove
self.noResize = noResize
self.noEditPoints = noEditPoints
self.noAdjustHandles = noAdjustHandles
self.noChangeArrowheads = noChangeArrowheads
self.noChangeShapeType = noChangeShapeType
self.extLst = extLst
self.alphaBiLevel = alphaBiLevel
self.alphaCeiling = alphaCeiling
self.alphaFloor = alphaFloor
self.alphaInv = alphaInv
self.alphaMod = alphaMod
self.alphaModFix = alphaModFix
self.alphaRepl = alphaRepl
self.biLevel = biLevel
self.blur = blur
self.clrChange = clrChange
self.clrRepl = clrRepl
self.duotone = duotone
self.fillOverlay = fillOverlay
self.grayscl = grayscl
self.hsl = hsl
self.lum = lum
self.tint = tint
class TileInfoProperties(Serialisable):
tx = Integer(allow_none=True)
ty = Integer(allow_none=True)
sx = Integer(allow_none=True)
sy = Integer(allow_none=True)
flip = NoneSet(values=(['x', 'y', 'xy']))
algn = Set(values=(['tl', 't', 'tr', 'l', 'ctr', 'r', 'bl', 'b', 'br']))
def __init__(self,
tx=None,
ty=None,
sx=None,
sy=None,
flip=None,
algn=None,
):
self.tx = tx
self.ty = ty
self.sx = sx
self.sy = sy
self.flip = flip
self.algn = algn
class BlipFillProperties(Serialisable):
tagname = "blipFill"
dpi = Integer(allow_none=True)
rotWithShape = Bool(allow_none=True)
blip = Typed(expected_type=Blip, allow_none=True)
srcRect = Typed(expected_type=RelativeRect, allow_none=True)
tile = Typed(expected_type=TileInfoProperties, allow_none=True)
stretch = Typed(expected_type=StretchInfoProperties, allow_none=True)
__elements__ = ("blip", "srcRect", "tile", "stretch")
def __init__(self,
dpi=None,
rotWithShape=None,
blip=None,
tile=None,
stretch=None,
srcRect=None,
):
self.dpi = dpi
self.rotWithShape = rotWithShape
self.blip = blip
self.tile = tile
self.stretch = stretch
self.srcRect = srcRect
| apache-2.0 |
django-danceschool/django-danceschool | danceschool/discounts/tests.py | 1 | 20249 | from django.urls import reverse
from django.utils import timezone
from datetime import timedelta
from danceschool.core.constants import REG_VALIDATION_STR, updateConstant
from danceschool.core.utils.tests import DefaultSchoolTestCase
from danceschool.core.models import Invoice, Registration
from .models import (
PointGroup, PricingTierGroup, DiscountCategory, DiscountCombo, DiscountComboComponent
)
class BaseDiscountsTest(DefaultSchoolTestCase):
def create_discount(self, **kwargs):
'''
This method just creates the necessary objects to create a simple discount
with a single required component.
'''
test_group, created = PointGroup.objects.get_or_create(
name=kwargs.get('pointGroupName', 'Test points')
)
pt_group, created = PricingTierGroup.objects.get_or_create(
group=test_group,
pricingTier=self.defaultPricing,
points=kwargs.get('pricingTierGroupPoints', 5),
)
# Create a flat price combo that just knocks $5 off the regular price
test_combo = DiscountCombo(
name=kwargs.get('name', 'Test Discount'),
category=kwargs.get('category', DiscountCategory.objects.get(id=1)),
discountType=kwargs.get('discountType', DiscountCombo.DiscountType.flatPrice),
onlinePrice=kwargs.get('onlinePrice', self.defaultPricing.onlinePrice - 5),
doorPrice=kwargs.get('doorPrice', self.defaultPricing.doorPrice - 5),
dollarDiscount=kwargs.get('dollarDiscount', 10),
percentDiscount=kwargs.get('percentDiscount', 50),
percentUniversallyApplied=kwargs.get('percentUniversallyApplied', False),
active=kwargs.get('active', True),
newCustomersOnly=kwargs.get('newCustomersOnly', False),
daysInAdvanceRequired=kwargs.get('daysInAdvanceRequired', None),
expirationDate=kwargs.get('expirationDate', None),
)
test_combo.save()
test_component = DiscountComboComponent.objects.create(
discountCombo=test_combo,
pointGroup=test_group,
quantity=kwargs.get('quantity', 5),
allWithinPointGroup=kwargs.get('allWithinPointGroup', False),
)
return (test_combo, test_component)
def register_to_check_discount(self, series, expected_amount=None):
'''
This method makes it easy to determine whether discounts are working
correctly for a single class registration
'''
s = series
response = self.client.get(reverse('registration'))
self.assertEqual(response.status_code, 200)
self.assertIn(s, response.context_data.get('regOpenSeries'))
# Sign up for the series, and check that we proceed to the student information page.
# Because of the way that roles are encoded on this form, we just grab the value to pass
# from the form itself.
post_data = {'series_%s_%s' % (
s.id, response.context_data['form'].fields['series_%s' % s.id].field_choices[0].get('value')
): [1,]}
response = self.client.post(reverse('registration'), post_data, follow=True)
self.assertEqual(response.redirect_chain, [(reverse('getStudentInfo'), 302)])
invoice = Invoice.objects.get(
id=self.client.session[REG_VALIDATION_STR].get('invoiceId')
)
tr = Registration.objects.filter(invoice=invoice).first()
self.assertTrue(tr.eventregistration_set.filter(event__id=s.id).exists())
self.assertFalse(tr.final)
# Check that the student info page lists the correct subtotal with
# the discount applied
self.assertEqual(invoice.grossTotal, s.getBasePrice())
if expected_amount is not None:
self.assertEqual(response.context_data.get('invoice').total, expected_amount)
# Continue to the summary page
post_data = {
'firstName': 'Discounted',
'lastName': 'Customer',
'email': '[email protected]',
'agreeToPolicies': True,
}
return self.client.post(reverse('getStudentInfo'), post_data, follow=True)
class DiscountsConditionsTest(BaseDiscountsTest):
def test_inactive_discount(self):
'''
Make a discount inactive and make sure that it doesn't work
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(active=False)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_expired_discount(self):
'''
Create an expired discount and make sure that it doesn't work.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
expirationDate=timezone.now() + timedelta(days=-1)
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discounts_disabled(self):
''' Disable discounts and check that they don't work anymore '''
updateConstant('general__discountsEnabled', False)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_notenoughpoints(self):
'''
Set the discount's components so that this discount is too small to apply, and
check that it doesn't get applied.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=10)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_noearlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it does not work less than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=1)
)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertFalse(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
class DiscountsTypesTest(BaseDiscountsTest):
def test_discount_applies(self):
'''
Create a flat $5 discount and test that it applies
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount()
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_earlybird(self):
'''
Create an early registration discount that requires three day
advance registration and ensure that it works more than
three days in advance.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(daysInAdvanceRequired=3)
s = self.create_series(
pricingTier=self.defaultPricing,
startTime=timezone.now() + timedelta(days=4)
)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_allwithinpointgroup(self):
'''
Set a discount to apply to an entire point group and check that the price
is still the flat price
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(quantity=1, allWithinPointGroup=True)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 5
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 5)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_dollarDiscount(self):
'''
Create a $10 off discount and check that it applies appropriately
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice() - 10)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 10
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 10)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_percentDiscount(self):
'''
Create a 50% off discount and check that it applies correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.percentDiscount,
percentDiscount=50,
percentUniversallyApplied=False
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice()*0.5)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, 0.5 * invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(
response.context_data.get('total_discount_amount'),
0.5 * invoice.grossTotal
)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
def test_addOnItem(self):
'''
Create a free add-on item and ensure that it is applied correctly.
'''
updateConstant('general__discountsEnabled', True)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.addOn,
name='Test Free Add-On',
)
s = self.create_series(pricingTier=self.defaultPricing)
response = self.register_to_check_discount(s, s.getBasePrice())
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 0)
self.assertTrue(response.context_data.get('addonItems'))
self.assertFalse(response.context_data.get('discount_codes'))
def test_discountmakesitfree(self):
'''
Make the dollar discount larger than the base price and check that
the registration is free, that the registration is processed and that
a $0 invoice is created.
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=s.getBasePrice() + 10
)
response = self.register_to_check_discount(s, 0)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(invoice.total, 0)
self.assertEqual(response.context_data.get('zero_balance'), True)
self.assertEqual(response.context_data.get('total_discount_amount'), s.getBasePrice())
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [test_combo.name, ])
# Since the above registration was free, check that the registration actually
# processed, and that there exists a paid Invoice for $0
finalReg = response.context_data.get('registration')
invoice = response.context_data.get('invoice')
self.assertTrue(finalReg)
self.assertTrue(finalReg.invoice)
self.assertEqual(finalReg.invoice, invoice)
self.assertTrue(invoice.status == Invoice.PaymentStatus.paid)
self.assertEqual(invoice.outstandingBalance, 0)
self.assertEqual(invoice.total, 0)
self.assertTrue(finalReg.final)
# Check that the invoice no longer has an expiration date
self.assertIsNone(invoice.expirationDate)
# Check that the RegistrationDiscount associated with this registration
# has been applied.
self.assertTrue(finalReg.registrationdiscount_set.first().applied)
# Show that multiple registrations by the same customer are not permitted
response = self.register_to_check_discount(s)
self.assertIn(
'You are already registered for',
' '.join(response.context_data['form'].errors.get('__all__'))
)
def test_largerdiscountapplies(self):
'''
Create both a $10 discount and a $20 discount, and ensure that the
larger discount applies
'''
updateConstant('general__discountsEnabled', True)
s = self.create_series(pricingTier=self.defaultPricing)
test_combo, test_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=10
)
bigger_combo, bigger_component = self.create_discount(
discountType=DiscountCombo.DiscountType.dollarDiscount,
dollarDiscount=20,
name='Bigger Discount'
)
response = self.register_to_check_discount(s, s.getBasePrice() - 20)
invoice = response.context_data.get('invoice')
self.assertEqual(response.redirect_chain, [(reverse('showRegSummary'), 302)])
self.assertEqual(invoice.grossTotal, s.getBasePrice())
self.assertEqual(
invoice.total, invoice.grossTotal - 20
)
self.assertEqual(response.context_data.get('zero_balance'), False)
self.assertEqual(response.context_data.get('total_discount_amount'), 20)
self.assertFalse(response.context_data.get('addonItems'))
discount_codes = response.context_data.get('discount_codes')
self.assertEqual([x[0] for x in discount_codes], [bigger_combo.name, ])
| bsd-3-clause |
lsinfo/odoo | addons/l10n_ma/__init__.py | 430 | 1071 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_ma
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ISBX/isbx-loopback-cms | vendor/js-beautify/python/jsbeautifier/__init__.py | 9 | 83180 | from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import copy
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2013 Einar Lielmanis and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, [email protected],
# Parsing improvement for brace-less and semicolon-less statements
# by Liam Newman <[email protected]>
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.eol = '\n'
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.space_after_anon_function = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
self.end_with_newline = False
self.comma_first = False
# For testing of beautify ignore:start directive
self.test_output_raw = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
space_after_anon_function = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.space_after_anon_function,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level
self.parent = flags_base
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
class Acorn:
def __init__(self):
# This is not pretty, but given how we did the version import
# it is the only way to do this without having setup.py fail on a missing six dependency.
self.six = __import__("six")
# This section of code was translated to python from acorn (javascript).
#
# Acorn was written by Marijn Haverbeke and released under an MIT
# license. The Unicode regexps (for identifiers and whitespace) were
# taken from [Esprima](http://esprima.org) by Ariya Hidayat.
#
# Git repositories for Acorn are available at
#
# http://marijnhaverbeke.nl/git/acorn
# https://github.com/marijnh/acorn.git
# ## Character categories
# Big ugly regular expressions that match characters in the
# whitespace, identifier, and identifier-start categories. These
# are only applied when a character is found to actually have a
# code point above 128.
self.nonASCIIwhitespace = re.compile(self.six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = self.six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = self.six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
# Whether a single character denotes a newline.
self.newline = re.compile(self.six.u("[\n\r\u2028\u2029]"))
# Matches a whole line break (where CRLF is considered a single
# line break). Used to count lines.
self.lineBreak = re.compile(self.six.u("\r\n|[\n\r\u2028\u2029]"))
# Test whether a given character code starts an identifier.
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True
return code >= 0xaa and self.nonASCIIidentifierStart.match(self.six.unichr(code)) != None
# Test whether a given character is part of an identifier.
def isIdentifierChar(self, code):
if code < 48:
return code == 36
if code < 58:
return True
if code < 65:
return False
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True
return code >= 0xaa and self.nonASCIIidentifier.match(self.six.unichr(code)) != None
class Token:
def __init__(self, type, text, newlines = 0, whitespace_before = '', mode = None, parent = None):
self.type = type
self.text = text
self.comments_before = []
self.newlines = newlines
self.wanted_newline = newlines > 0
self.whitespace_before = whitespace_before
self.parent = None
self.directives = None
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts)
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-e, --eol=STRING character(s) to use as line terminators. (default newline - "\\n")
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-a, --space_after_anon_function add a space before an anonymous function's parens, ie. function ()
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-r, --replace write output in-place, replacing input
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
-n, --end_with_newline End output with newline
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = copy.copy(opts)
self.blank_state()
self.acorn = Acorn()
def blank_state(self, js_source_text = None):
# internal flags
self.flags = None
self.previous_flags = None
self.flag_store = []
self.tokens = []
self.token_pos = 0
# force opts.space_after_anon_function to true if opts.jslint_happy
if self.opts.jslint_happy:
self.opts.space_after_anon_function = True
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.opts.eol = self.opts.eol.replace('\\r', '\r').replace('\\n', '\n')
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.baseIndentString = ''
self.last_type = 'TK_START_BLOCK' # last token type
self.last_last_text = '' # pre-last token text
preindent_index = 0;
if not js_source_text == None and len(js_source_text) > 0:
while preindent_index < len(js_source_text) and \
js_source_text[preindent_index] in [' ', '\t'] :
self.baseIndentString += js_source_text[preindent_index]
preindent_index += 1
js_source_text = js_source_text[preindent_index:]
self.output = Output(self.indent_string, self.baseIndentString)
# If testing the ignore directive, start with output disable set to true
self.output.raw = self.opts.test_output_raw;
self.set_mode(MODE.BlockStatement)
return js_source_text
def beautify(self, s, opts = None ):
if opts != None:
self.opts = copy.copy(opts)
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand', 'none']:
raise(Exception('opts.brace_style must be "expand", "collapse", "end-expand", or "none".'))
s = self.blank_state(s)
input = self.unpack(s, self.opts.eval_code)
self.handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
'TK_EOF': self.handle_eof
}
self.tokens = Tokenizer(input, self.opts, self.indent_string).tokenize()
self.token_pos = 0
while not self.get_token() == None:
local_token = self.get_token()
for comment_token in local_token.comments_before:
# The cleanest handling of inline comments is to treat them as though they aren't there.
# Just continue formatting and the behavior should be logical.
# Also ignore unknown tokens. Again, this should result in better behavior.
self.handle_token(comment_token)
self.handle_token(local_token)
self.last_last_text = self.flags.last_text
self.last_type = local_token.type
self.flags.last_text = local_token.text
self.token_pos += 1
sweet_code = self.output.get_code()
if self.opts.end_with_newline:
sweet_code += '\n'
if not self.opts.eol == '\n':
sweet_code = sweet_code.replace('\n', self.opts.eol)
return sweet_code
def handle_token(self, local_token):
newlines = local_token.newlines
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
for i in range(newlines):
self.print_newline(i > 0)
else: # not keep_whitespace
if self.opts.max_preserve_newlines != 0 and newlines > self.opts.max_preserve_newlines:
newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and newlines > 1:
self.print_newline()
for i in range(1, newlines):
self.print_newline(True)
self.handlers[local_token.type](local_token)
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def allow_wrap_or_preserved_newline(self, current_token, force_linewrap = False):
# never wrap the first token of a line.
if self.output.just_added_newline():
return
if (self.opts.preserve_newlines and current_token.wanted_newline) or force_linewrap:
self.print_newline(preserve_statement_flags = True)
elif self.opts.wrap_line_length > 0:
proposed_line_length = self.output.current_line.get_character_count() + len(current_token.text)
if self.output.space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
self.print_newline(preserve_statement_flags = True)
def print_newline(self, force_newline = False, preserve_statement_flags = False):
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
if self.output.add_new_line(force_newline):
self.flags.multiline_frame = True
def print_token_line_indentation(self, current_token):
if self.output.just_added_newline():
line = self.output.current_line
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and current_token.wanted_newline:
line.push(current_token.whitespace_before)
self.output.space_before_token = False
elif self.output.set_indent(self.flags.indentation_level):
self.flags.line_indent_level = self.flags.indentation_level
def print_token(self, current_token, s=None):
if self.output.raw:
self.output.add_raw_token(current_token)
return
if self.opts.comma_first and self.last_type == 'TK_COMMA' and self.output.just_added_newline():
if self.output.previous_line.last() == ',':
self.output.previous_line.pop()
self.print_token_line_indentation(current_token)
self.output.add_token(',')
self.output.space_before_token = True
if s == None:
s = current_token.text
self.print_token_line_indentation(current_token)
self.output.add_token(s);
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.output.just_added_newline())
self.flags.start_line_index = self.output.get_line_number();
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.output.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and \
((self.flags.last_text == ':' and self.flags.ternary_depth == 0) or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set']))
def start_of_statement(self, current_token):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and current_token.type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not current_token.wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (current_token.type == 'TK_RESERVED' and current_token.text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (current_token.text == '--' or current_token.text == '++')
and self.last_last_text != 'function'
and current_token.type != 'TK_WORD' and current_token.type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and \
((self.flags.last_text == ':' and self.flags.ternary_depth == 0) or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set'])))
):
self.set_mode(MODE.Statement)
self.indent()
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and current_token.type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token, current_token.type == 'TK_RESERVED' and current_token.text in ['do', 'for', 'if', 'while'])
return True
else:
return False
def get_token(self, offset = 0):
index = self.token_pos + offset
if index < 0 or index >= len(self.tokens):
return None
else:
return self.tokens[index]
def handle_start_expr(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if current_token.text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in Tokenizer.line_starters:
self.output.space_before_token = True
self.set_mode(next_mode)
self.print_token(current_token)
self.indent()
if self.opts.space_in_paren:
self.output.space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.print_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.print_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(current_token, current_token.wanted_newline)
elif not (self.last_type == 'TK_RESERVED' and current_token.text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output.space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.space_after_anon_function:
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in Tokenizer.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output.space_before_token = True
elif current_token.text == '(' and self.last_type == 'TK_RESERVED' and self.flags.last_word == 'await':
self.output.space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
self.set_mode(next_mode)
self.print_token(current_token)
if self.opts.space_in_paren:
self.output.space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, current_token):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(current_token, current_token.text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output.space_before_token = False
self.output.trim()
else:
self.output.space_before_token = True
if current_token.text == ']' and self.opts.keep_array_indentation:
self.print_token(current_token)
self.restore_mode()
else:
self.restore_mode()
self.print_token(current_token)
self.output.remove_redundant_indentation(self.previous_flags)
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, current_token):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
next_token = self.get_token(1)
second_token = self.get_token(2)
if second_token != None and \
((second_token.text == ':' and next_token.type in ['TK_STRING', 'TK_WORD', 'TK_RESERVED']) \
or (next_token.text in ['get', 'set'] and second_token.type in ['TK_WORD', 'TK_RESERVED'])):
# We don't support TypeScript,but we didn't break it for a very long time.
# We'll try to keep not breaking it.
if not self.last_last_text in ['class','interface']:
self.set_mode(MODE.ObjectLiteral);
else:
self.set_mode(MODE.BlockStatement)
else:
self.set_mode(MODE.BlockStatement)
empty_braces = (not next_token == None) and len(next_token.comments_before) == 0 and next_token.text == '}'
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand' or \
(self.opts.brace_style == 'none' and current_token.wanted_newline):
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output.space_before_token = True
else:
self.print_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.print_newline()
else:
self.output.space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output.space_before_token = True
else:
self.print_newline()
self.print_token(current_token)
self.indent()
def handle_end_block(self, current_token):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK'
if self.opts.brace_style == 'expand':
if not empty_braces:
self.print_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.print_newline()
self.opts.keep_array_indentation = True
else:
self.print_newline()
self.restore_mode()
self.print_token(current_token)
def handle_word(self, current_token):
if current_token.type == 'TK_RESERVED' and self.flags.mode != MODE.ObjectLiteral and \
current_token.text in ['set', 'get']:
current_token.type = 'TK_WORD'
if current_token.type == 'TK_RESERVED' and self.flags.mode == MODE.ObjectLiteral:
next_token = self.get_token(1)
if next_token.text == ':':
current_token.type = 'TK_WORD'
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
elif current_token.wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.print_newline()
if self.flags.do_block and not self.flags.do_while:
if current_token.type == 'TK_RESERVED' and current_token.text == 'while':
# do {} ## while ()
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.print_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (current_token.type == 'TK_RESERVED' and current_token.text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False
if current_token.type == 'TK_RESERVED' and (current_token.text == 'case' or (current_token.text == 'default' and self.flags.in_case_statement)):
self.print_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.print_token(current_token)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if current_token.type == 'TK_RESERVED' and current_token.text == 'function':
if self.flags.last_text in ['}', ';'] or (self.output.just_added_newline() and not self.flags.last_text in ['[', '{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.output.just_added_blankline() and len(current_token.comments_before) == 0:
self.print_newline()
self.print_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return', 'export', 'async']:
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' and self.flags.last_text == 'default' and self.last_last_text == 'export':
self.output.space_before_token = True
else:
self.print_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output.space_before_token = True
elif not self.flags.multiline_frame and (self.is_expression(self.flags.mode) or self.is_array(self.flags.mode)):
# (function
pass
else:
self.print_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
if current_token.type == 'TK_RESERVED' and current_token.text in ['function', 'get', 'set']:
self.print_token(current_token)
self.flags.last_word = current_token.text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (current_token.type == 'TK_RESERVED' and current_token.text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand'] or \
(self.opts.brace_style == 'none' and current_token.wanted_newline):
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output.space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output.space_before_token = True
prefix = 'NEWLINE'
if current_token.type == 'TK_RESERVED' and current_token.text in Tokenizer.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else ' or self.flags.last_text == 'export':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if current_token.type == 'TK_RESERVED' and current_token.text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand' \
or (self.opts.brace_style == 'none' and current_token.wanted_newline):
self.print_newline()
else:
self.output.trim(True)
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if self.output.current_line.last() != '}':
self.print_newline()
self.output.space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output.space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (current_token.type == 'TK_RESERVED' and current_token.text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if current_token.type == 'TK_RESERVED' and current_token.text == 'if' and self.flags.last_text == 'else':
self.output.space_before_token = True
else:
self.print_newline()
elif current_token.type == 'TK_RESERVED' and current_token.text in Tokenizer.line_starters and self.flags.last_text != ')':
self.print_newline()
elif self.flags.multiline_frame and self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.print_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output.space_before_token = True
self.print_token(current_token)
self.flags.last_word = current_token.text
if current_token.type == 'TK_RESERVED' and current_token.text == 'do':
self.flags.do_block = True
if current_token.type == 'TK_RESERVED' and current_token.text == 'if':
self.flags.if_block = True
def handle_semicolon(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output.space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.print_token(current_token)
def handle_string(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output.space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
else:
self.print_newline()
self.print_token(current_token)
def handle_equals(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
def handle_comma(self, current_token):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.print_token(current_token)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.print_newline(preserve_statement_flags = True)
else:
self.output.space_before_token = True
# for comma-first, we want to allow a newline before the comma
# to turn into a newline after the comma, which we will fixup later
if self.opts.comma_first:
self.allow_wrap_or_preserved_newline(current_token)
return
self.print_token(current_token)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.print_newline()
else:
# EXPR or DO_BLOCK
self.output.space_before_token = True
# for comma-first, we want to allow a newline before the comma
# to turn into a newline after the comma, which we will fixup later
if self.opts.comma_first:
self.allow_wrap_or_preserved_newline(current_token)
def handle_operator(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output.space_before_token = True
self.print_token(current_token)
return
# hack for actionscript's import .*;
if current_token.text == '*' and self.last_type == 'TK_DOT':
self.print_token(current_token)
return
if current_token.text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.print_token(current_token)
self.print_newline()
self.flags.in_case = False
return
if current_token.text == '::':
# no spaces around the exotic namespacing syntax operator
self.print_token(current_token)
return
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(current_token)
space_before = True
space_after = True
if current_token.text in ['--', '++', '!', '~'] \
or (current_token.text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in Tokenizer.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if current_token.wanted_newline and (current_token.text == '--' or current_token.text == '++'):
self.print_newline(preserve_statement_flags = True)
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
elif self.last_type == 'TK_END_EXPR':
space_before = not (self.flags.last_text == ']' and current_token.text in ['--', '++'])
elif self.last_type == 'TK_OPERATOR':
# a++ + ++b
# a - -b
space_before = current_token.text in ['--', '-','++', '+'] and self.flags.last_text in ['--', '-','++', '+']
# + and - are not unary when preceeded by -- or ++ operator
# a-- + b
# a * +b
# a - -b
if current_token.text in ['-', '+'] and self.flags.last_text in ['--', '++']:
space_after = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.print_newline()
elif current_token.text == ':':
if self.flags.ternary_depth == 0:
# Colon is invalid javascript outside of ternary and object, but do our best to guess what was meant.
space_before = False
else:
self.flags.ternary_depth -= 1
elif current_token.text == '?':
self.flags.ternary_depth += 1
elif current_token.text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output.space_before_token = True
self.print_token(current_token)
if space_after:
self.output.space_before_token = True
def handle_block_comment(self, current_token):
if self.output.raw:
self.output.add_raw_token(current_token)
if current_token.directives and current_token.directives.get('preserve') == 'end':
# If we're testing the raw output behavior, do not allow a directive to turn it off.
if not self.opts.test_output_raw:
self.output.raw = False
return
if current_token.directives:
self.print_newline(preserve_statement_flags = True)
self.print_token(current_token)
if current_token.directives.get('preserve') == 'start':
self.output.raw = True
self.print_newline(preserve_statement_flags = True)
return
# inline block
if not self.acorn.newline.search(current_token.text) and not current_token.wanted_newline:
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
return
lines = self.acorn.lineBreak.split(current_token.text)
javadoc = False
starless = False
last_indent = current_token.whitespace_before
last_indent_length = len(last_indent)
# block comment starts with a new line
self.print_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
elif all(l.startswith(last_indent) or l.strip() == '' for l in lines[1:]):
starless = True
# first line always indented
self.print_token(current_token, lines[0])
for line in lines[1:]:
self.print_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.print_token(current_token, ' ' + line.lstrip())
elif starless and len(line) > last_indent_length:
# starless: re-indent non-empty content, avoiding trim
self.print_token(current_token, line[last_indent_length:])
else:
# normal comments output raw
self.output.add_token(line)
self.print_newline(preserve_statement_flags = True)
def handle_comment(self, current_token):
if current_token.wanted_newline:
self.print_newline(preserve_statement_flags = True)
if not current_token.wanted_newline:
self.output.trim(True)
self.output.space_before_token = True
self.print_token(current_token)
self.print_newline(preserve_statement_flags = True)
def handle_dot(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output.space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(current_token,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.print_token(current_token)
def handle_unknown(self, current_token):
self.print_token(current_token)
if current_token.text[-1] == '\n':
self.print_newline()
def handle_eof(self, current_token):
# Unwind any open statements
while self.flags.mode == MODE.Statement:
self.restore_mode()
def mkdir_p(path):
try:
if path:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# Using object instead of string to allow for later expansion of info about each line
class OutputLine:
def __init__(self, parent):
self.__parent = parent
self.__character_count = 0
self.__indent_count = -1
self.__items = []
self.__empty = True
def get_character_count(self):
return self.__character_count
def is_empty(self):
return self.__empty
def set_indent(self, level):
self.__character_count = self.__parent.baseIndentLength + level * self.__parent.indent_length
self.__indent_count = level;
def last(self):
if not self.is_empty():
return self.__items[-1]
else:
return None
def push(self, input):
self.__items.append(input)
self.__character_count += len(input)
self.__empty = False
def pop(self):
item = None
if not self.is_empty():
item = self.__items.pop()
self.__character_count -= len(item)
self.__empty = len(self.__items) == 0
return item
def remove_indent(self):
if self.__indent_count > 0:
self.__indent_count -= 1
self.__character_count -= self.__parent.indent_length
def trim(self):
while self.last() == ' ':
item = self._items.pop()
self.__character_count -= 1
self.__empty = len(self.__items) == 0
def toString(self):
result = ''
if not self.is_empty():
if self.__indent_count >= 0:
result = self.__parent.indent_cache[self.__indent_count]
result += ''.join(self.__items)
return result
class Output:
def __init__(self, indent_string, baseIndentString = ''):
self.indent_string = indent_string
self.baseIndentString = baseIndentString
self.indent_cache = [ baseIndentString ]
self.baseIndentLength = len(baseIndentString)
self.indent_length = len(indent_string)
self.raw = False
self.lines = []
self.previous_line = None
self.current_line = None
self.space_before_token = False
self.add_outputline()
def add_outputline(self):
self.previous_line = self.current_line
self.current_line = OutputLine(self)
self.lines.append(self.current_line)
def get_line_number(self):
return len(self.lines)
def add_new_line(self, force_newline):
if len(self.lines) == 1 and self.just_added_newline():
# no newline on start of file
return False
if force_newline or not self.just_added_newline():
if not self.raw:
self.add_outputline()
return True
return False
def get_code(self):
sweet_code = "\n".join(line.toString() for line in self.lines)
return re.sub('[\r\n\t ]+$', '', sweet_code)
def set_indent(self, level):
# Never indent your first output indent at the start of the file
if len(self.lines) > 1:
while level >= len(self.indent_cache):
self.indent_cache.append(self.indent_cache[-1] + self.indent_string)
self.current_line.set_indent(level)
return True
self.current_line.set_indent(0)
return False
def add_raw_token(self, token):
for _ in range(token.newlines):
self.add_outputline()
self.current_line.push(token.whitespace_before)
self.current_line.push(token.text)
self.space_before_token = False
def add_token(self, printable_token):
self.add_space_before_token()
self.current_line.push(printable_token)
def add_space_before_token(self):
if self.space_before_token and not self.just_added_newline():
self.current_line.push(' ')
self.space_before_token = False
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame or frame.mode == MODE.ForInitializer or frame.mode == MODE.Conditional:
return
# remove one indent from each line inside this section
index = frame.start_line_index
while index < len(self.lines):
self.lines[index].remove_indent()
index += 1
def trim(self, eat_newlines = False):
self.current_line.trim()
while eat_newlines and len(self.lines) > 1 and self.current_line.is_empty():
self.lines.pop()
self.current_line = self.lines[-1]
self.current_line.trim()
if len(self.lines) > 1:
self.previous_line = self.lines[-2]
else:
self.previous_line = None
def just_added_newline(self):
return self.current_line.is_empty()
def just_added_blankline(self):
if self.just_added_newline():
if len(self.lines) == 1:
return True
line = self.lines[-2]
return line.is_empty()
return False
class Tokenizer:
whitespace = ["\n", "\r", "\t", " "]
digit = re.compile('[0-9]')
digit_hex = re.compile('[0123456789abcdefABCDEF]')
punct = ('+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>' \
+ ' <?= <? ?> <%= <% %>').split(' ')
# Words which always should start on a new line
line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,import,export'.split(',')
reserved_words = line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof', 'yield', 'async', 'await']
def __init__ (self, input, opts, indent_string):
self.input = input
self.opts = opts
self.indent_string = indent_string
self.acorn = Acorn()
# /* ... */ comment ends with nearest */ or end of file
self.block_comment_pattern = re.compile('([\s\S]*?)((?:\*\/)|$)')
# comment ends just before nearest linefeed or end of file
self.comment_pattern = re.compile(self.acorn.six.u('([^\n\r\u2028\u2029]*)'))
self.directives_block_pattern = re.compile('\/\* beautify( \w+[:]\w+)+ \*\/')
self.directive_pattern = re.compile(' (\w+)[:](\w+)')
self.directives_end_ignore_pattern = re.compile('([\s\S]*?)((?:\/\*\sbeautify\signore:end\s\*\/)|$)')
self.template_pattern = re.compile('((<\?php|<\?=)[\s\S]*?\?>)|(<%[\s\S]*?%>)')
def tokenize(self):
self.in_html_comment = False
self.parser_pos = 0
self.tokens = []
next = None
last = None
open = None
open_stack = []
comments = []
while not (not last == None and last.type == 'TK_EOF'):
token_values = self.__tokenize_next()
next = Token(token_values[1], token_values[0], self.n_newlines, self.whitespace_before_token)
while next.type == 'TK_COMMENT' or next.type == 'TK_BLOCK_COMMENT' or next.type == 'TK_UNKNOWN':
if next.type == 'TK_BLOCK_COMMENT':
next.directives = token_values[2]
comments.append(next)
token_values = self.__tokenize_next()
next = Token(token_values[1], token_values[0], self.n_newlines, self.whitespace_before_token)
if len(comments) > 0:
next.comments_before = comments
comments = []
if next.type == 'TK_START_BLOCK' or next.type == 'TK_START_EXPR':
next.parent = last
open_stack.append(open)
open = next
elif (next.type == 'TK_END_BLOCK' or next.type == 'TK_END_EXPR') and \
(not open == None and ( \
(next.text == ']' and open.text == '[') or \
(next.text == ')' and open.text == '(') or \
(next.text == '}' and open.text == '{'))):
next.parent = open.parent
open = open_stack.pop()
self.tokens.append(next)
last = next
return self.tokens
def get_directives (self, text):
if not self.directives_block_pattern.match(text):
return None
directives = {}
directive_match = self.directive_pattern.search(text)
while directive_match:
directives[directive_match.group(1)] = directive_match.group(2)
directive_match = self.directive_pattern.search(text, directive_match.end())
return directives
def __tokenize_next(self):
whitespace_on_this_line = []
self.n_newlines = 0
self.whitespace_before_token = ''
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
if len(self.tokens) > 0:
last_token = self.tokens[-1]
else:
# For the sake of tokenizing we can pretend that there was on open brace to start
last_token = Token('TK_START_BLOCK', '{')
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if self.acorn.newline.match(c):
# treat \r\n as one newline
if not (c == '\n' and self.input[self.parser_pos-2] == '\r'):
self.n_newlines += 1
whitespace_on_this_line = []
else:
whitespace_on_this_line.append(c)
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
if len(whitespace_on_this_line) != 0:
self.whitespace_before_token = ''.join(whitespace_on_this_line)
if self.digit.match(c):
allow_decimal = True
allow_e = True
local_digit = self.digit
if c == '0' and self.parser_pos < len(self.input) and re.match('[Xx]', self.input[self.parser_pos]):
# switch to hex number, no decimal or e, just hex digits
allow_decimal = False
allow_e = False
c += self.input[self.parser_pos]
self.parser_pos += 1
local_digit = self.digit_hex
else:
# we know this first loop will run. It keeps the logic simpler.
c = ''
self.parser_pos -= 1
# Add the digits
while self.parser_pos < len(self.input) and local_digit.match(self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
if allow_decimal and self.parser_pos < len(self.input) and self.input[self.parser_pos] == '.':
c += self.input[self.parser_pos]
self.parser_pos += 1
allow_decimal = False
if allow_e and self.parser_pos < len(self.input) and re.match('[Ee]', self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos < len(self.input) and re.match('[+-]', self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
allow_e = False
allow_decimal = False
return c, 'TK_WORD'
if self.acorn.isIdentifierStart(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
if not (last_token.type == 'TK_DOT' \
or (last_token.type == 'TK_RESERVED' and last_token.text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
comment_match = self.block_comment_pattern.match(self.input, self.parser_pos)
comment = '/*' + comment_match.group(0)
self.parser_pos += len(comment_match.group(0))
directives = self.get_directives(comment)
if directives and directives.get('ignore') == 'start':
comment_match = self.directives_end_ignore_pattern.match(self.input, self.parser_pos)
comment += comment_match.group(0)
self.parser_pos += len(comment_match.group(0))
comment = re.sub(self.acorn.lineBreak, '\n', comment)
return comment, 'TK_BLOCK_COMMENT', directives
if self.input[self.parser_pos] == '/': # peek // comment
self.parser_pos += 1
comment_match = self.comment_pattern.match(self.input, self.parser_pos)
comment = '//' + comment_match.group(0)
self.parser_pos += len(comment_match.group(0));
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<([-a-zA-Z:0-9_.]+|{[^{}]*}|!\[CDATA\[[\s\S]*?\]\])(\s+[-a-zA-Z:0-9_.]+\s*=\s*(\'[^\']*\'|"[^"]*"|{.*?}))*\s*(/?)\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(last_token.type == 'TK_RESERVED' and last_token.text in ['return', 'case', 'throw', 'else', 'do', 'typeof', 'yield']) or \
(last_token.type == 'TK_END_EXPR' and last_token.text == ')' and \
last_token.parent and last_token.parent.type == 'TK_RESERVED' and last_token.parent.text in ['if', 'while', 'for']) or \
(last_token.type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if sep == '/':
# handle regexp
in_char_class = False
while self.parser_pos < len(self.input) and \
(esc or in_char_class or self.input[self.parser_pos] != sep) and \
not self.acorn.newline.match(self.input[self.parser_pos]):
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)([-a-zA-Z:0-9_.]+|{[^{}]*}|!\[CDATA\[[\s\S]*?\]\])(\s+[-a-zA-Z:0-9_.]+\s*=\s*(\'[^\']*\'|"[^"]*"|{.*?}))*\s*(/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
xmlStr = re.sub(self.acorn.lineBreak, '\n', xmlStr[:xmlLength])
return xmlStr, 'TK_STRING'
else:
# handle string
while self.parser_pos < len(self.input) and \
(esc or (self.input[self.parser_pos] != sep and
(sep == '`' or not self.acorn.newline.match(self.input[self.parser_pos])))):
resulting_string += self.input[self.parser_pos]
# Handle \r\n linebreaks after escapes or in template strings
if self.input[self.parser_pos] == '\r' and self.parser_pos + 1 < len(self.input) and self.input[self.parser_pos + 1] == '\n':
self.parser_pos += 1
resulting_string += '\n'
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos < len(self.input) and self.input[self.parser_pos] == sep:
resulting_string += sep
self.parser_pos += 1
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
# Only [gim] are valid, but if the user puts in garbage, do what we can to take it.
while self.parser_pos < len(self.input) and self.acorn.isIdentifierStart(ord(self.input[self.parser_pos])):
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
resulting_string = re.sub(self.acorn.lineBreak, '\n', resulting_string)
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.tokens) == 0 and len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.digit.match(self.input[self.parser_pos]):
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos] in ['?', '%']:
template_match = self.template_pattern.match(self.input, self.parser_pos - 1);
if template_match:
c = template_match.group(0)
self.parser_pos += len(c) - 1
c = re.sub(self.acorn.lineBreak, '\n', c)
return c, 'TK_STRING'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and not self.acorn.newline.match(self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
self.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def isFileDifferent(filepath, expected):
try:
return (''.join(open(filepath).readlines()) != expected)
except:
return True
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:e:o:rdEPjabkil:xhtfvXnCw:",
['indent-size=','indent-char=','eol=''outfile=', 'replace', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'space-after-anon-function',
'brace-style=', 'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'end-with-newline','comma-first','wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
replace = False
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--replace', '-r'):
replace = True
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--eol', '-e'):
js_options.eol = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--space_after_anon_function', '-a'):
js_options.space_after_anon_function = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--end-with-newline', '-n'):
js_options.end_with_newline = True
elif opt in ('--comma-first', '-C'):
js_options.comma_first = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout' and replace and not file == '-':
outfile = file
pretty = beautify_file(file, js_options)
if outfile == 'stdout':
sys.stdout.write(pretty)
else:
if isFileDifferent(outfile, pretty):
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(pretty)
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| mit |
smurfix/DaBroker | dabroker/base/transport/__init__.py | 1 | 4226 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from gevent import GreenletExit
from dabroker.util.thread import prep_spawned
import logging
logger = logging.getLogger("dabroker.base.transport")
class ConnectionError(RuntimeError):
pass
class BaseCallbacks(object):
def recv(self,msg):
"""Incoming message from the other side. NOT used for receiving replies!"""
raise NotImplementedError("You need to override {}.recv()".format(self.__class__.__name__))
def send(self,msg):
"""Outgoing message to the other side. NOT used for sending replies!"""
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def ended(self,err=None):
"""Called on receiver error. Do not reconnect here!"""
pass
def reconnect(self,err=None):
"""Called after a closed connection has been cleaned up"""
pass
def register_codec(self,codec):
raise NotImplementedError("You need to override {}.register_codec()".format(self.__class__.__name__))
class RelayedError(Exception):
"""An encapsulation for a server error (with traceback)"""
def __init__(self,err,tb):
self.err = str(err)
self.tb = tb
def __repr__(self):
return "{}({})".format(self.__class__.__name__,self.err)
def __str__(self):
r = repr(self)
if self.tb is None: return r
return r+"\n"+self.tb
class BaseTransport(object):
_job = None
defaults = {}
connection = None
last_msgid = 0
def __init__(self,callbacks, cfg={}):
self.cfg = self.defaults.copy()
self.cfg.update(cfg)
self.callbacks = callbacks
self.trace = cfg.get('trace',0)
def connect(self, purge=False):
"""Connect. (Synchronously.)
Do not override!
Override .connect1() (setup) and .connect2() (initial tasks)"""
assert self.callbacks is not None
assert self.connection is None
self.connect1()
if purge:
self.purge_all()
self.connect2()
def connect1(self):
"""Set up a connection.
Call super() before your code."""
if self._job is not None:
raise RuntimeError("Already connected")
logger.debug("connecting: %r",self)
def connect2(self):
"""Add initial tasks after a connection has been established.
Call super() after your code."""
assert self._job is None
self._job = self._run_job()
self._job.start()
def disconnect(self):
"""Sever the connection; do not auto-reconnect."""
logger.debug("disconnecting: %r",self)
j,self._job = self._job,None
if j:
j.stop()
def disconnected(self, err=None):
"""Clear connection objects.
This will be called by the reader task as it exits.
Do not reconnect from here; do that in your .reconnect"""
logger.debug("disconnected: %r",self)
def purge_all(self):
"""
Clear this transport's message queue.
This should only be called when client and server are known to
be idle AND when you suspect an unprocessable message might
clog the queue.
"""
pass
def send(self,msg):
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def run(self):
raise NotImplementedError("You need to override {}.run()".format(self.__class__.__name__))
@prep_spawned
def _run_job(self):
try:
logger.debug("Running receiver loop: %r",self)
self.run()
except GreenletExit:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
except BaseException as e:
err = e
logger.exception("Receiver loop error: %r",self)
self.callbacks.ended(e)
else:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
finally:
self.disconnected()
if self._job is not None:
self._job = None
self.callbacks.reconnect(err)
| gpl-3.0 |
GrandmasterK/XScheduler | venv/lib/python2.7/site-packages/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| mit |
Tennyson53/SUR | magnum/tests/unit/common/cert_manager/test_local.py | 3 | 5314 | # Copyright 2014 Rackspace US, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from magnum.common.cert_manager import cert_manager
from magnum.common.cert_manager import local_cert_manager
from magnum.tests import base
class TestLocalCert(base.BaseTestCase):
def setUp(self):
self.certificate = "My Certificate"
self.intermediates = "My Intermediates"
self.private_key = "My Private Key"
self.private_key_passphrase = "My Private Key Passphrase"
super(TestLocalCert, self).setUp()
def test_local_cert(self):
# Create a cert
cert = local_cert_manager.Cert(
certificate=self.certificate,
intermediates=self.intermediates,
private_key=self.private_key,
private_key_passphrase=self.private_key_passphrase
)
# Validate the cert functions
self.assertEqual(cert.get_certificate(), self.certificate)
self.assertEqual(cert.get_intermediates(), self.intermediates)
self.assertEqual(cert.get_private_key(), self.private_key)
self.assertEqual(cert.get_private_key_passphrase(),
self.private_key_passphrase)
class TestLocalManager(base.BaseTestCase):
def setUp(self):
self.certificate = "My Certificate"
self.intermediates = "My Intermediates"
self.private_key = "My Private Key"
self.private_key_passphrase = "My Private Key Passphrase"
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="certificates", storage_path="/tmp/")
super(TestLocalManager, self).setUp()
def _store_cert(self):
file_mock = mock.mock_open()
# Attempt to store the cert
with mock.patch('__builtin__.open', file_mock, create=True):
cert_id = local_cert_manager.CertManager.store_cert(
certificate=self.certificate,
intermediates=self.intermediates,
private_key=self.private_key,
private_key_passphrase=self.private_key_passphrase
)
# Check that something came back
self.assertIsNotNone(cert_id)
# Verify the correct files were opened
file_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'w'),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'w')
], any_order=True)
# Verify the writes were made
file_mock().write.assert_has_calls([
mock.call(self.certificate),
mock.call(self.intermediates),
mock.call(self.private_key),
mock.call(self.private_key_passphrase)
], any_order=True)
return cert_id
def _get_cert(self, cert_id):
file_mock = mock.mock_open()
# Attempt to retrieve the cert
with mock.patch('__builtin__.open', file_mock, create=True):
data = local_cert_manager.CertManager.get_cert(cert_id)
# Verify the correct files were opened
file_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'r'),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'r')
], any_order=True)
# The returned data should be a Cert object
self.assertIsInstance(data, cert_manager.Cert)
return data
def _delete_cert(self, cert_id):
remove_mock = mock.Mock()
# Delete the cert
with mock.patch('os.remove', remove_mock):
local_cert_manager.CertManager.delete_cert(cert_id)
# Verify the correct files were removed
remove_mock.assert_has_calls([
mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.key'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.int'.format(cert_id))),
mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)))
], any_order=True)
def test_store_cert(self):
self._store_cert()
def test_get_cert(self):
# Store a cert
cert_id = self._store_cert()
# Get the cert
self._get_cert(cert_id)
def test_delete_cert(self):
# Store a cert
cert_id = self._store_cert()
# Verify the cert exists
self._get_cert(cert_id)
# Delete the cert
self._delete_cert(cert_id)
| apache-2.0 |
iocast/vectorformats | vectorformats/formats/dxf.py | 2 | 1362 | from dxfwrite import DXFEngine as dxf
from .format import Format
class DXF(Format):
_drawing = None
def encode(self, features, **kwargs):
tmpFile = kwargs["tmpFile"]
if len(features) > 0:
self._drawing = dxf.drawing(tmpFile)
self._drawing.add_layer("featureserver")
for feature in features:
self.encode_feature(feature)
self._drawing.save()
return self._drawing
def encode_feature(self, feature):
if feature["geometry"]["type"] == "Point":
self._drawing.add(dxf.point(point=(feature["geometry"]["coordinates"][0],feature["geometry"]["coordinates"][1])))
elif feature["geometry"]["type"] == "LineString":
polyline= dxf.polyline()
coords = feature["geometry"]["coordinates"]
for coord in coords:
polyline.add_vertex((coord[0], coord[1]))
self._drawing.add(polyline)
elif feature["geometry"]["type"] == "Polygon":
polygon = dxf.polyline()
coords = feature["geometry"]["coordinates"]
for coord in coords:
for point in coord:
polygon.add_vertex((point[0], point[1]))
polygon.close()
self._drawing.add(polygon)
| mit |
joelsmith/openshift-tools | ansible/roles/lib_openshift_3.2/build/src/oc_user.py | 13 | 4702 | # vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCUser(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'users'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
groups=None,
verbose=False):
''' Constructor for OCVolume '''
super(OCUser, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.groups = groups
self._user = None
@property
def user(self):
''' property function service'''
if not self._user:
self.get()
return self._user
@user.setter
def user(self, data):
''' setter function for yedit var '''
self._user = data
def exists(self):
''' return whether a user exists '''
if self.user:
return True
return False
def get(self):
'''return user information '''
result = self._get(self.kind, self.config.username)
if result['returncode'] == 0:
self.user = User(content=result['results'][0])
elif 'users \"%s\" not found' % self.config.username in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.username)
def create_group_entries(self):
''' make entries for user to the provided group list '''
if self.groups != None:
for group in self.groups:
cmd = ['groups', 'add-users', group, self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
return {'returncode': 0}
def create(self):
'''create the object'''
rval = self.create_group_entries()
if rval['returncode'] != 0:
return rval
return self._create_from_content(self.config.username, self.config.data)
def group_update(self):
''' update group membership '''
rval = {'returncode': 0}
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
cmd = ['groups', 'add-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
cmd = ['groups', 'remove-users', group['metadata']['name'],
self.config.username]
rval = self.openshift_cmd(cmd, oadm=True)
if rval['returncode'] != 0:
return rval
return rval
def update(self):
'''update the object'''
rval = self.group_update()
if rval['returncode'] != 0:
return rval
# need to update the user's info
return self._replace_content(self.kind, self.config.username, self.config.data, force=True)
def needs_group_update(self):
''' check if there are group membership changes '''
cmd = ['get', 'groups', '-n', self.namespace, '-o', 'json']
all_groups = self.openshift_cmd(cmd, output=True)
for group in all_groups['results']['items']:
# If we're supposed to be in this group
if group['metadata']['name'] in self.groups \
and ( group['users'] == None or self.config.username not in group['users']):
return True
# else if we're in the group, but aren't supposed to be
elif self.config.username in group['users'] \
and group['metadata']['name'] not in self.groups:
return True
return False
def needs_update(self):
''' verify an update is needed '''
skip = []
if self.needs_group_update() == True:
return True
return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True)
| apache-2.0 |
vaidap/zulip | zerver/webhooks/slack/view.py | 3 | 1608 | from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message, create_stream_if_needed
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_string, check_int
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
ZULIP_MESSAGE_TEMPLATE = u"**{message_sender}**: `{text}`"
VALID_OPTIONS = {'SHOULD_NOT_BE_MAPPED': '0', 'SHOULD_BE_MAPPED': '1'}
@api_key_only_webhook_view('Slack')
@has_request_variables
def api_slack_webhook(request, user_profile,
user_name=REQ(),
text=REQ(),
channel_name=REQ(),
stream=REQ(default='slack'),
channels_map_to_topics=REQ(default='1')):
# type: (HttpRequest, UserProfile, str, str, str, str, str) -> HttpResponse
if channels_map_to_topics not in list(VALID_OPTIONS.values()):
return json_error(_('Error: channels_map_to_topics parameter other than 0 or 1'))
if channels_map_to_topics == VALID_OPTIONS['SHOULD_BE_MAPPED']:
subject = "channel: {}".format(channel_name)
else:
stream = channel_name
subject = _("Message from Slack")
content = ZULIP_MESSAGE_TEMPLATE.format(message_sender=user_name, text=text)
check_send_message(user_profile, request.client, "stream", [stream], subject, content)
return json_success()
| apache-2.0 |
tbekolay/neurotools | examples/single_neuron/CRF_neuron_vs_signal.py | 3 | 3070 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
CRF_neuron_vs_signal.py
Testing the mean firing rate of a fiber for different signal strengths.
Prints to a figure the mean firing rate for the output (ON and OFF) as a function
of the different parameter values. It's similar to a CRF function.
Results illustrate that
- the higher the value the more the neuron spikes (wouah!),
- that this follows a ramp-type of function
- and that noise "smoothes" the transition in theinput/output function.
TODO: do a better plot as in benchmark_neuron_vs_noise.py
$Id: CRF_neuron_vs_signal.py 362 2008-12-08 17:35:59Z LaurentPerrinet $
"""
import os, sys, numpy, pylab, shelve
from NeuroTools.parameters import *
# this is not mandatory but just a "easy_install progressbar" away
# else remove all corresponding 3 lines in this code...
import progressbar # see http://projects.scipy.org/pipermail/scipy-dev/2008-January/008200.html
N_exp_snr = 20
N_exp_noise = 9
ps = ParameterSpace({
'snr' : ParameterRange(list(numpy.linspace(-1.,4.,N_exp_snr))),
'noise_std' : ParameterRange(list(10.**(numpy.linspace(-.50,1.,N_exp_noise))))})
name = sys.argv[0].split('.')[0] # name of the current script withpout the '.py' part
results = shelve.open('results/mat-' + name)
try:
CRF = results['CRF']
except:
# calculates the dimension of the parameter space
results_dim, results_label = ps.parameter_space_dimension_labels()
# creates results array with size of parameter space dimension
import simple_single_neuron as model
myFibers = model.FiberChannel()
CRF = numpy.empty(results_dim)
pbar=progressbar.ProgressBar(widgets=[name, " ", progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()], maxval=numpy.prod(results_dim))
for i_exp,experiment in enumerate(ps.iter_inner()):
params = myFibers.params
params.update(experiment) # updates what changed in the dictionary
# simulate the experiment and get its data
data = myFibers.run(params,verbose=False)
# calculating the index in the parameter space
index = ps.parameter_space_index(experiment)
# put the data at the right position in the results array
CRF[index] = data.mean_rate()#
pbar.update(i_exp)
results['CRF'] = CRF
pbar.finish()
results.close()
#numpy.array(p.noise_std._values),numpy.array(p.snr._values),
#pylab.plot(ps.snr._values,CRF.transpose()) #color = (sin(2*pi*noise_list)**2,cos(2*pi*noise_list)**2,1))
for i_noise, noise in enumerate(ps.noise_std._values):
pylab.plot(ps.snr._values,CRF[i_noise,:], label='noise = %5.3f' % noise)
#pylab.yticks(p.noise_std._values[:2:])
pylab.ylabel('Firing Rate (Hz/neuron)')
#pylab.xticks(p.snr._values[:2:])
pylab.xlabel('Signal')
pylab.legend(loc = 'lower right')
pylab.axis([numpy.min(ps.snr._values), numpy.max(ps.snr._values), 0.0, numpy.max(CRF[:])])
if 0:
pylab.show()
else:
pylab.savefig('results/fig-' + name + '.pdf')
pylab.savefig('results/fig-' + name + '.png')
| gpl-2.0 |
chrisjaquet/FreeCAD | src/Mod/Path/PathScripts/nc/dynapath.py | 30 | 1067 | import nc
import iso
import math
import datetime
import time
from format import Format
now = datetime.datetime.now()
class Creator(iso.Creator):
def __init__(self):
iso.Creator.__init__(self)
self.output_tool_definitions = False
self.m_codes_on_their_own_line = True
self.output_g98_and_g99 = False
#self.fmt = Format(dp_wanted = False, add_trailing_zeros = True, add_plus = True)
#def SPACE_STR(self): return ' '
def PROGRAM(self): return None
def RETRACT(self, height): return('R' + (self.fmt.string(height)))
def PECK_DEPTH(self, depth): return('O' + (self.fmt.string(depth)))
def program_begin(self, id, name=''):
self.write('(' + name + ')\n')
def imperial(self):
#self.g_list.append(self.IMPERIAL())
self.fmt.number_of_decimal_places = 4
def metric(self):
#self.g_list.append(self.METRIC())
self.fmt.number_of_decimal_places = 3
def comment(self, text):
pass
nc.creator = Creator()
| lgpl-2.1 |
MattFaus/CrowdTube-Connector | youtube.py | 1 | 6824 | import os
import urlparse
from lib import gdata
import lib.gdata.youtube.client
import secrets
GDATA_API_CLIENT_ID = 'CrowdTube-Connector'
class YouTubeCaptionEditor(object):
def __init__(self, google_email, google_password, youtube_username):
self.youtube_username = youtube_username
self.youtube_client = lib.gdata.youtube.client.YouTubeClient()
# We shouldn't need this auth_token, but we'll keep it around
self.auth_token = self.youtube_client.client_login(
google_email, google_password, GDATA_API_CLIENT_ID)
# A dictionary of youtube_id and YouTubeVideo objects
self.videos = {}
def get_videos(self):
# Format copied from lib.gdata.youtube.client.py
feed_uri = '%s%s/%s' % (lib.gdata.youtube.client.YOUTUBE_USER_FEED_URI,
self.youtube_username, 'uploads')
all_videos = self.youtube_client.get_videos(uri=feed_uri)
for video in all_videos.entry:
new_video = YouTubeVideo(video, self.youtube_client)
self.videos[new_video.video_id] = new_video
def get_video(self, video_id):
video_entry = self.youtube_client.get_video_entry(video_id=video_id)
return YouTubeVideo(video_entry, self.youtube_client)
def delete_track(self, video_id, track_id):
"""Deletes an existing track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
response = self.youtube_client.delete_track(video_id, track_id,
client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
# http://docs.python.org/release/2.2.3/lib/httpresponse-objects.html
if response.status != 200:
print response.status, response.msg
return False
return True
def add_track(self, video_id, title, language, track_content):
"""Adds a caption track.
If a track with the same title already exists, this will silently fail.
"""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.create_track(video_id, title, language,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
def update_track(self, video_id, track_id, track_content):
"""Adds a caption track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.update_track(video_id, track_id,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
# TODO(mattfaus): Suck these two classes into the YouTubeCaptionEditor, above
# make the YouTubeCaptionEditor behave more like a full-fledged youtube client
# Shouldn't have to pass the youtube_client object around to the sub-classes
# No need to have dictionaries where an array would do just fine (YouTubeVideo.caption_tracks)
class YouTubeVideo(object):
def __init__(self, video_entry, youtube_client=None):
self.youtube_client = youtube_client
# tag:youtube.com,2008:video:SNrEiiJwD4Y
id_parts = video_entry.GetId().split(':')
self.video_id = id_parts[id_parts.index('video') + 1]
self.title = video_entry.title.text
caption_link = video_entry.get_link(
'http://gdata.youtube.com/schemas/2007#video.captionTracks')
self.caption_feed = caption_link.href
# TODO(mattfaus): Make this less ugly
has_entries = [
a.value for a in caption_link.GetAttributes()
if '{http://gdata.youtube.com/schemas/2007}hasEntries' == a._qname]
has_entries = has_entries[0] == 'true'
self.has_entries = has_entries
self.caption_tracks = {}
def get_caption_tracks(self, download=False):
# Don't check self.has_entries. It may be False when only a
# machine-generated caption track exists.
if not self.youtube_client:
raise ValueError('No youtube client available!')
# STOPSHIP(mattfaus): get_caption_feed() only returns the first 24 caption tracks
# so we must iterate to read more
# TODO(mattfaus): Filter this by language with the 'lr' attribute
all_captions = self.youtube_client.get_caption_feed(self.caption_feed)
for caption_entry in all_captions.entry:
new_track = YouTubeCaptionTrack(caption_entry, self.youtube_client)
self.caption_tracks[new_track.track_source] = new_track
if download:
new_track.download_track()
def get_machine_generated_track(self):
self.get_caption_tracks()
for src, caption_track in self.caption_tracks.iteritems():
print src, caption_track
if caption_track.machine_generated:
caption_track.download_track()
return caption_track
class YouTubeCaptionTrack(object):
def __init__(self, caption_entry, youtube_client):
self.youtube_client = youtube_client
self.language = caption_entry.content.lang
self.track_source = caption_entry.content.src
self.machine_generated = YouTubeCaptionTrack._is_machine_generated(
caption_entry)
# Parse the video_id and caption_id out of a url like this:
# https://gdata.youtube.com/feeds/api/videos/Jom6EtXzRMg/captiondata/Ch4LEO3ZhwUaFQjIic2vrcLuxCYSAmVuGgAiA2Fzcgw
o = urlparse.urlparse(self.track_source)
path_parts = o.path.split('/')
self.video_id = path_parts[path_parts.index('videos') + 1]
self.track_id = path_parts[path_parts.index('captiondata') + 1]
self.track_content = None
@staticmethod
def _is_machine_generated(caption_entry):
"""Looks for the derived element, and returns True if it is equal to
speechRecognition.
"""
# TODO(mattfaus): Move this to TrackEntry within youtube/data.py?
derived = caption_entry.GetElements(
tag='derived', namespace='http://gdata.youtube.com/schemas/2007')
if not derived:
return False
else:
derived = derived[0]
return derived.text == 'speechRecognition'
def download_track(self):
response = self.youtube_client.get_caption_track(
track_url=self.track_source, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
self.track_content = response.read(2 ** 31)
return self.track_content
| mit |
40223136/w17test1 | static/Brython3.1.3-20150514-095342/Lib/logging/config.py | 739 | 35619 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| gpl-3.0 |
flamholz/thrift | lib/py/src/protocol/TProtocol.py | 75 | 10848 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import *
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TProtocolBase:
"""Base class for Thrift protocol driver."""
def __init__(self, trans):
self.trans = trans
def writeMessageBegin(self, name, ttype, seqid):
pass
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, ttype, fid):
pass
def writeFieldEnd(self):
pass
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
pass
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
pass
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
pass
def writeSetEnd(self):
pass
def writeBool(self, bool_val):
pass
def writeByte(self, byte):
pass
def writeI16(self, i16):
pass
def writeI32(self, i32):
pass
def writeI64(self, i64):
pass
def writeDouble(self, dub):
pass
def writeString(self, str_val):
pass
def readMessageBegin(self):
pass
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
pass
def readFieldEnd(self):
pass
def readMapBegin(self):
pass
def readMapEnd(self):
pass
def readListBegin(self):
pass
def readListEnd(self):
pass
def readSetBegin(self):
pass
def readSetEnd(self):
pass
def readBool(self):
pass
def readByte(self):
pass
def readI16(self):
pass
def readI32(self):
pass
def readI64(self):
pass
def readDouble(self):
pass
def readString(self):
pass
def skip(self, ttype):
if ttype == TType.STOP:
return
elif ttype == TType.BOOL:
self.readBool()
elif ttype == TType.BYTE:
self.readByte()
elif ttype == TType.I16:
self.readI16()
elif ttype == TType.I32:
self.readI32()
elif ttype == TType.I64:
self.readI64()
elif ttype == TType.DOUBLE:
self.readDouble()
elif ttype == TType.STRING:
self.readString()
elif ttype == TType.STRUCT:
name = self.readStructBegin()
while True:
(name, ttype, id) = self.readFieldBegin()
if ttype == TType.STOP:
break
self.skip(ttype)
self.readFieldEnd()
self.readStructEnd()
elif ttype == TType.MAP:
(ktype, vtype, size) = self.readMapBegin()
for i in xrange(size):
self.skip(ktype)
self.skip(vtype)
self.readMapEnd()
elif ttype == TType.SET:
(etype, size) = self.readSetBegin()
for i in xrange(size):
self.skip(etype)
self.readSetEnd()
elif ttype == TType.LIST:
(etype, size) = self.readListBegin()
for i in xrange(size):
self.skip(etype)
self.readListEnd()
# tuple of: ( 'reader method' name, is_container bool, 'writer_method' name )
_TTYPE_HANDLERS = (
(None, None, False), # 0 TType.STOP
(None, None, False), # 1 TType.VOID # TODO: handle void?
('readBool', 'writeBool', False), # 2 TType.BOOL
('readByte', 'writeByte', False), # 3 TType.BYTE and I08
('readDouble', 'writeDouble', False), # 4 TType.DOUBLE
(None, None, False), # 5 undefined
('readI16', 'writeI16', False), # 6 TType.I16
(None, None, False), # 7 undefined
('readI32', 'writeI32', False), # 8 TType.I32
(None, None, False), # 9 undefined
('readI64', 'writeI64', False), # 10 TType.I64
('readString', 'writeString', False), # 11 TType.STRING and UTF7
('readContainerStruct', 'writeContainerStruct', True), # 12 *.STRUCT
('readContainerMap', 'writeContainerMap', True), # 13 TType.MAP
('readContainerSet', 'writeContainerSet', True), # 14 TType.SET
('readContainerList', 'writeContainerList', True), # 15 TType.LIST
(None, None, False), # 16 TType.UTF8 # TODO: handle utf8 types?
(None, None, False) # 17 TType.UTF16 # TODO: handle utf16 types?
)
def readFieldByTType(self, ttype, spec):
try:
(r_handler, w_handler, is_container) = self._TTYPE_HANDLERS[ttype]
except IndexError:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
if r_handler is None:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
reader = getattr(self, r_handler)
if not is_container:
return reader()
return reader(spec)
def readContainerList(self, spec):
results = []
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(list_type, list_len) = self.readListBegin()
if tspec is None:
# list values are simple types
for idx in xrange(list_len):
results.append(reader())
else:
# this is like an inlined readFieldByTType
container_reader = self._TTYPE_HANDLERS[list_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(list_len):
val = val_reader(tspec)
results.append(val)
self.readListEnd()
return results
def readContainerSet(self, spec):
results = set()
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(set_type, set_len) = self.readSetBegin()
if tspec is None:
# set members are simple types
for idx in xrange(set_len):
results.add(reader())
else:
container_reader = self._TTYPE_HANDLERS[set_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(set_len):
results.add(val_reader(tspec))
self.readSetEnd()
return results
def readContainerStruct(self, spec):
(obj_class, obj_spec) = spec
obj = obj_class()
obj.read(self)
return obj
def readContainerMap(self, spec):
results = dict()
key_ttype, key_spec = spec[0], spec[1]
val_ttype, val_spec = spec[2], spec[3]
(map_ktype, map_vtype, map_len) = self.readMapBegin()
# TODO: compare types we just decoded with thrift_spec and
# abort/skip if types disagree
key_reader = getattr(self, self._TTYPE_HANDLERS[key_ttype][0])
val_reader = getattr(self, self._TTYPE_HANDLERS[val_ttype][0])
# list values are simple types
for idx in xrange(map_len):
if key_spec is None:
k_val = key_reader()
else:
k_val = self.readFieldByTType(key_ttype, key_spec)
if val_spec is None:
v_val = val_reader()
else:
v_val = self.readFieldByTType(val_ttype, val_spec)
# this raises a TypeError with unhashable keys types
# i.e. this fails: d=dict(); d[[0,1]] = 2
results[k_val] = v_val
self.readMapEnd()
return results
def readStruct(self, obj, thrift_spec):
self.readStructBegin()
while True:
(fname, ftype, fid) = self.readFieldBegin()
if ftype == TType.STOP:
break
try:
field = thrift_spec[fid]
except IndexError:
self.skip(ftype)
else:
if field is not None and ftype == field[1]:
fname = field[2]
fspec = field[3]
val = self.readFieldByTType(ftype, fspec)
setattr(obj, fname, val)
else:
self.skip(ftype)
self.readFieldEnd()
self.readStructEnd()
def writeContainerStruct(self, val, spec):
val.write(self)
def writeContainerList(self, val, spec):
self.writeListBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeListEnd()
def writeContainerSet(self, val, spec):
self.writeSetBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeSetEnd()
def writeContainerMap(self, val, spec):
k_type = spec[0]
v_type = spec[2]
ignore, ktype_name, k_is_container = self._TTYPE_HANDLERS[k_type]
ignore, vtype_name, v_is_container = self._TTYPE_HANDLERS[v_type]
k_writer = getattr(self, ktype_name)
v_writer = getattr(self, vtype_name)
self.writeMapBegin(k_type, v_type, len(val))
for m_key, m_val in val.iteritems():
if not k_is_container:
k_writer(m_key)
else:
k_writer(m_key, spec[1])
if not v_is_container:
v_writer(m_val)
else:
v_writer(m_val, spec[3])
self.writeMapEnd()
def writeStruct(self, obj, thrift_spec):
self.writeStructBegin(obj.__class__.__name__)
for field in thrift_spec:
if field is None:
continue
fname = field[2]
val = getattr(obj, fname)
if val is None:
# skip writing out unset fields
continue
fid = field[0]
ftype = field[1]
fspec = field[3]
# get the writer method for this value
self.writeFieldBegin(fname, ftype, fid)
self.writeFieldByTType(ftype, val, fspec)
self.writeFieldEnd()
self.writeFieldStop()
self.writeStructEnd()
def writeFieldByTType(self, ttype, val, spec):
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[ttype]
writer = getattr(self, w_handler)
if is_container:
writer(val, spec)
else:
writer(val)
class TProtocolFactory:
def getProtocol(self, trans):
pass
| apache-2.0 |
qtumproject/qtum | test/functional/feature_filelock.py | 8 | 1833 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second bitcoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, self.chain)
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second bitcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second bitcoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main()
| mit |
nickpack/reportlab | src/reportlab/pdfbase/pdfform.py | 3 | 17084 |
"""Support for Acrobat Forms in ReportLab documents
This module is somewhat experimental at this time.
Includes basic support for
textfields,
select fields (drop down lists), and
check buttons.
The public interface consists of functions at the moment.
At some later date these operations may be made into canvas
methods. (comments?)
The ...Absolute(...) functions position the fields with respect
to the absolute canvas coordinate space -- that is, they do not
respect any coordinate transforms in effect for the canvas.
The ...Relative(...) functions position the ONLY THE LOWER LEFT
CORNER of the field using the coordinate transform in effect for
the canvas. THIS WILL ONLY WORK CORRECTLY FOR TRANSLATED COORDINATES
-- THE SHAPE, SIZE, FONTSIZE, AND ORIENTATION OF THE FIELD WILL NOT BE EFFECTED
BY SCALING, ROTATION, SKEWING OR OTHER NON-TRANSLATION COORDINATE
TRANSFORMS.
Please note that all field names (titles) in a given document must be unique.
Textfields and select fields only support the "base 14" canvas fonts
at this time.
See individual function docstrings below for more information.
The function test1(...) generates a simple test file.
THIS CONTRIBUTION WAS COMMISSIONED BY REPORTLAB USERS
WHO WISH TO REMAIN ANONYMOUS.
"""
### NOTE: MAKE THE STRING FORMATS DYNAMIC IN PATTERNS TO SUPPORT ENCRYPTION XXXX
import string
from reportlab.pdfbase.pdfdoc import LINEEND, PDFString, PDFStream, PDFDictionary, PDFName
from reportlab.lib.colors import obj_R_G_B
#==========================public interfaces
def textFieldAbsolute(canvas, title, x, y, width, height, value="", maxlen=1000000, multiline=0):
"""Place a text field on the current page
with name title at ABSOLUTE position (x,y) with
dimensions (width, height), using value as the default value and
maxlen as the maximum permissible length. If multiline is set make
it a multiline field.
"""
theform = getForm(canvas)
return theform.textField(canvas, title, x, y, x+width, y+height, value, maxlen, multiline)
def textFieldRelative(canvas, title, xR, yR, width, height, value="", maxlen=1000000, multiline=0):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return textFieldAbsolute(canvas, title, xA, yA, width, height, value, maxlen, multiline)
def buttonFieldAbsolute(canvas, title, value, x, y):
"""Place a check button field on the current page
with name title and default value value (one of "Yes" or "Off")
at ABSOLUTE position (x,y).
"""
theform = getForm(canvas)
return theform.buttonField(canvas, title, value, x, y)
def buttonFieldRelative(canvas, title, value, xR, yR):
"same as buttonFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return buttonFieldAbsolute(canvas, title, value, xA, yA)
def selectFieldAbsolute(canvas, title, value, options, x, y, width, height):
"""Place a select field (drop down list) on the current page
with name title and
with options listed in the sequence options
default value value (must be one of options)
at ABSOLUTE position (x,y) with dimensions (width, height)."""
theform = getForm(canvas)
theform.selectField(canvas, title, value, options, x, y, x+width, y+height)
def selectFieldRelative(canvas, title, value, options, xR, yR, width, height):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return selectFieldAbsolute(canvas, title, value, options, xA, yA, width, height)
def test1():
from reportlab.pdfgen import canvas
fn = "formtest1.pdf"
c = canvas.Canvas(fn)
# first page
c.setFont("Courier", 10)
c.drawString(100, 500, "hello world")
textFieldAbsolute(c, "fieldA", 100, 600, 100, 20, "default value")
textFieldAbsolute(c, "fieldB", 100, 300, 100, 50, "another default value", multiline=1)
selectFieldAbsolute(c, "fieldC", "France", ["Canada", "France", "China"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldAbsolute(c, "field2", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldAbsolute(c, "field3", "Off", 100, 800)
c.rect(100, 800, 20, 20)
# second page
c.showPage()
c.setFont("Helvetica", 7)
c.translate(50, 20)
c.drawString(100, 500, "hello world")
textFieldRelative(c, "fieldA_1", 100, 600, 100, 20, "default value 2")
c.setStrokeColorRGB(1,0,0)
c.setFillColorRGB(0,1,0.5)
textFieldRelative(c, "fieldB_1", 100, 300, 100, 50, "another default value 2", multiline=1)
selectFieldRelative(c, "fieldC_1", "France 1", ["Canada 0", "France 1", "China 2"], 100, 200, 100, 20)
c.rect(100, 600, 100, 20)
buttonFieldRelative(c, "field2_1", "Yes", 100, 700)
c.rect(100, 700, 20, 20)
buttonFieldRelative(c, "field3_1", "Off", 100, 800)
c.rect(100, 800, 20, 20)
c.save()
print "wrote", fn
#==========================end of public interfaces
from pdfpattern import PDFPattern
def getForm(canvas):
"get form from canvas, create the form if needed"
try:
return canvas.AcroForm
except AttributeError:
theform = canvas.AcroForm = AcroForm()
# install the form in the document
d = canvas._doc
cat = d._catalog
cat.AcroForm = theform
return theform
class AcroForm:
__PDFObject__ = True
def __init__(self):
self.fields = []
def textField(self, canvas, title, xmin, ymin, xmax, ymax, value="", maxlen=1000000, multiline=0):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = TextField(title, value, xmin, ymin, xmax, ymax, page, maxlen,
font, fontsize, R, G, B, multiline)
self.fields.append(field)
canvas._addAnnotation(field)
def selectField(self, canvas, title, value, options, xmin, ymin, xmax, ymax):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font=font, fontsize=fontsize, R=R, G=G, B=B)
self.fields.append(field)
canvas._addAnnotation(field)
def buttonField(self, canvas, title, value, xmin, ymin):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
field = ButtonField(title, value, xmin, ymin, page)
self.fields.append(field)
canvas._addAnnotation(field)
def format(self, document):
from reportlab.pdfbase.pdfdoc import PDFArray
proxy = PDFPattern(FormPattern, Resources=GLOBALRESOURCES, fields=PDFArray(self.fields))
return proxy.format(document)
FormPattern = [
'<<', LINEEND,
' /NeedAppearances true ', LINEEND,
' /DA ', PDFString('/Helv 0 Tf 0 g '), LINEEND,
' /DR ', LINEEND,
["Resources"],
' /Fields ', LINEEND,
["fields"],
'>>'
]
def FormFontsDictionary():
from reportlab.pdfbase.pdfdoc import PDFDictionary
fontsdictionary = PDFDictionary()
fontsdictionary.__RefOnly__ = 1
for (fullname, shortname) in FORMFONTNAMES.items():
fontsdictionary[shortname] = FormFont(fullname, shortname)
fontsdictionary["ZaDb"] = ZADB
return fontsdictionary
def FormResources():
return PDFPattern(FormResourcesDictionaryPattern,
Encoding=ENCODING, Font=GLOBALFONTSDICTIONARY)
ZaDbPattern = [
' <<'
' /BaseFont'
' /ZapfDingbats'
' /Name'
' /ZaDb'
' /Subtype'
' /Type1'
' /Type'
' /Font'
'>>']
ZADB = PDFPattern(ZaDbPattern)
FormResourcesDictionaryPattern = [
'<<',
' /Encoding ',
["Encoding"], LINEEND,
' /Font ',
["Font"], LINEEND,
'>>'
]
FORMFONTNAMES = {
"Helvetica": "Helv",
"Helvetica-Bold": "HeBo",
'Courier': "Cour",
'Courier-Bold': "CoBo",
'Courier-Oblique': "CoOb",
'Courier-BoldOblique': "CoBO",
'Helvetica-Oblique': "HeOb",
'Helvetica-BoldOblique': "HeBO",
'Times-Roman': "Time",
'Times-Bold': "TiBo",
'Times-Italic': "TiIt",
'Times-BoldItalic': "TiBI",
}
EncodingPattern = [
'<<',
' /PDFDocEncoding ',
["PDFDocEncoding"], LINEEND,
'>>',
]
PDFDocEncodingPattern = [
'<<'
' /Differences'
' ['
' 24'
' /breve'
' /caron'
' /circumflex'
' /dotaccent'
' /hungarumlaut'
' /ogonek'
' /ring'
' /tilde'
' 39'
' /quotesingle'
' 96'
' /grave'
' 128'
' /bullet'
' /dagger'
' /daggerdbl'
' /ellipsis'
' /emdash'
' /endash'
' /florin'
' /fraction'
' /guilsinglleft'
' /guilsinglright'
' /minus'
' /perthousand'
' /quotedblbase'
' /quotedblleft'
' /quotedblright'
' /quoteleft'
' /quoteright'
' /quotesinglbase'
' /trademark'
' /fi'
' /fl'
' /Lslash'
' /OE'
' /Scaron'
' /Ydieresis'
' /Zcaron'
' /dotlessi'
' /lslash'
' /oe'
' /scaron'
' /zcaron'
' 160'
' /Euro'
' 164'
' /currency'
' 166'
' /brokenbar'
' 168'
' /dieresis'
' /copyright'
' /ordfeminine'
' 172'
' /logicalnot'
' /.notdef'
' /registered'
' /macron'
' /degree'
' /plusminus'
' /twosuperior'
' /threesuperior'
' /acute'
' /mu'
' 183'
' /periodcentered'
' /cedilla'
' /onesuperior'
' /ordmasculine'
' 188'
' /onequarter'
' /onehalf'
' /threequarters'
' 192'
' /Agrave'
' /Aacute'
' /Acircumflex'
' /Atilde'
' /Adieresis'
' /Aring'
' /AE'
' /Ccedilla'
' /Egrave'
' /Eacute'
' /Ecircumflex'
' /Edieresis'
' /Igrave'
' /Iacute'
' /Icircumflex'
' /Idieresis'
' /Eth'
' /Ntilde'
' /Ograve'
' /Oacute'
' /Ocircumflex'
' /Otilde'
' /Odieresis'
' /multiply'
' /Oslash'
' /Ugrave'
' /Uacute'
' /Ucircumflex'
' /Udieresis'
' /Yacute'
' /Thorn'
' /germandbls'
' /agrave'
' /aacute'
' /acircumflex'
' /atilde'
' /adieresis'
' /aring'
' /ae'
' /ccedilla'
' /egrave'
' /eacute'
' /ecircumflex'
' /edieresis'
' /igrave'
' /iacute'
' /icircumflex'
' /idieresis'
' /eth'
' /ntilde'
' /ograve'
' /oacute'
' /ocircumflex'
' /otilde'
' /odieresis'
' /divide'
' /oslash'
' /ugrave'
' /uacute'
' /ucircumflex'
' /udieresis'
' /yacute'
' /thorn'
' /ydieresis'
' ]'
' /Type'
' /Encoding'
'>>']
# global constant
PDFDOCENC = PDFPattern(PDFDocEncodingPattern)
# global constant
ENCODING = PDFPattern(EncodingPattern, PDFDocEncoding=PDFDOCENC)
def FormFont(BaseFont, Name):
from reportlab.pdfbase.pdfdoc import PDFName
return PDFPattern(FormFontPattern, BaseFont=PDFName(BaseFont), Name=PDFName(Name), Encoding=PDFDOCENC)
FormFontPattern = [
'<<',
' /BaseFont ',
["BaseFont"], LINEEND,
' /Encoding ',
["Encoding"], LINEEND,
' /Name ',
["Name"], LINEEND,
' /Subtype '
' /Type1 '
' /Type '
' /Font '
'>>' ]
# global constants
GLOBALFONTSDICTIONARY = FormFontsDictionary()
GLOBALRESOURCES = FormResources()
def TextField(title, value, xmin, ymin, xmax, ymax, page,
maxlen=1000000, font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627, multiline=0):
from reportlab.pdfbase.pdfdoc import PDFString, PDFName
Flags = 0
if multiline:
Flags = Flags | (1<<12) # bit 13 is at position 12 :)
fontname = FORMFONTNAMES[font]
return PDFPattern(TextFieldPattern,
value=PDFString(value), maxlen=maxlen, page=page,
title=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B, Flags=Flags)
TextFieldPattern = [
'<<'
' /DA'
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)'
' /DV ',
["value"], LINEEND,
' /F 4 /FT /Tx'
'/MK << /BC [ 0 0 0 ] >>'
' /MaxLen ',
["maxlen"], LINEEND,
' /P ',
["page"], LINEEND,
' /Rect '
' [', ["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"], ' ]'
'/Subtype /Widget'
' /T ',
["title"], LINEEND,
' /Type'
' /Annot'
' /V ',
["value"], LINEEND,
' /Ff ',
["Flags"],LINEEND,
'>>']
def SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627):
#print "ARGS", (title, value, options, xmin, ymin, xmax, ymax, page, font, fontsize, R, G, B)
from reportlab.pdfbase.pdfdoc import PDFString, PDFName, PDFArray
if value not in options:
raise ValueError, "value %s must be one of options %s" % (repr(value), repr(options))
fontname = FORMFONTNAMES[font]
optionstrings = map(PDFString, options)
optionarray = PDFArray(optionstrings)
return PDFPattern(SelectFieldPattern,
Options=optionarray,
Selected=PDFString(value), Page=page,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B)
SelectFieldPattern = [
'<< % a select list',LINEEND,
' /DA ',
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)',LINEEND,
#' (/Helv 12 Tf 0 g)',LINEEND,
' /DV ',
["Selected"],LINEEND,
' /F ',
' 4',LINEEND,
' /FT ',
' /Ch',LINEEND,
' /MK ',
' <<',
' /BC',
' [',
' 0',
' 0',
' 0',
' ]',
' /BG',
' [',
' 1',
' 1',
' 1',
' ]',
' >>',LINEEND,
' /Opt ',
["Options"],LINEEND,
' /P ',
["Page"],LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget',LINEEND,
' /T ',
["Name"],LINEEND,
' /Type ',
' /Annot',
' /V ',
["Selected"],LINEEND,
'>>']
def ButtonField(title, value, xmin, ymin, page):
if value not in ("Yes", "Off"):
raise ValueError, "button value must be 'Yes' or 'Off': "+repr(value)
(dx, dy) = (16.77036, 14.90698)
return PDFPattern(ButtonFieldPattern,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmin+dx, ymax=ymin+dy,
Hide=HIDE,
APDOff=APDOFF,
APDYes=APDYES,
APNYes=APNYES,
Value=PDFName(value),
Page=page)
ButtonFieldPattern = ['<< ',
'/AA',
' <<',
' /D ',
["Hide"], LINEEND,
#' %(imported.18.0)s',
' >> ',
'/AP ',
' <<',
' /D',
' <<',
' /Off ',
#' %(imported.40.0)s',
["APDOff"], LINEEND,
' /Yes ',
#' %(imported.39.0)s',
["APDYes"], LINEEND,
' >>', LINEEND,
' /N',
' << ',
' /Yes ',
#' %(imported.38.0)s',
["APNYes"], LINEEND,
' >>',
' >>', LINEEND,
' /AS ',
["Value"], LINEEND,
' /DA ',
PDFString('/ZaDb 0 Tf 0 g'), LINEEND,
'/DV ',
["Value"], LINEEND,
'/F ',
' 4 ',
'/FT ',
' /Btn ',
'/H ',
' /T ',
'/MK ',
' <<',
' /AC (\\376\\377)',
#PDFString('\376\377'),
' /CA ',
PDFString('4'),
' /RC ',
PDFString('\376\377'),
' >> ',LINEEND,
'/P ',
["Page"], LINEEND,
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ',LINEEND,
'/Subtype',
' /Widget ',
'/T ',
["Name"], LINEEND,
'/Type',
' /Annot ',
'/V ',
["Value"], LINEEND,
' >>']
HIDE = PDFPattern([
'<< '
'/S '
' /Hide '
'>>'])
def buttonStreamDictionary():
"everything except the length for the button appearance streams"
result = PDFDictionary()
result["SubType"] = "/Form"
result["BBox"] = "[0 0 16.77036 14.90698]"
font = PDFDictionary()
font["ZaDb"] = ZADB
resources = PDFDictionary()
resources["ProcSet"] = "[ /PDF /Text ]"
resources["Font"] = font
result["Resources"] = resources
return result
def ButtonStream(content):
dict = buttonStreamDictionary()
result = PDFStream(dict, content)
result.filters = []
return result
APDOFF = ButtonStream('0.749 g 0 0 16.7704 14.907 re f'+LINEEND)
APDYES = ButtonStream(
'0.749 g 0 0 16.7704 14.907 re f q 1 1 14.7704 12.907 re W '+
'n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET'+LINEEND)
APNYES = ButtonStream(
'q 1 1 14.7704 12.907 re W n BT /ZaDb 11.3086 Tf 0 g 1 0 0 1 3.6017 3.3881 Tm (4) Tj ET Q'+LINEEND)
#==== script interpretation
if __name__=="__main__":
test1()
| bsd-3-clause |
a-parhom/edx-platform | common/djangoapps/student/tests/test_password_policy.py | 2 | 14639 | # -*- coding: utf-8 -*-
"""
This test file will verify proper password policy enforcement, which is an option feature
"""
import json
from importlib import import_module
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import patch
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_authn.views.deprecated import create_account
from util.password_policy_validators import create_validator_config
class TestPasswordPolicy(TestCase):
"""
Go through some password policy tests to make sure things are properly working
"""
def setUp(self):
super(TestPasswordPolicy, self).setUp()
self.url = reverse('create_account')
self.request_factory = RequestFactory()
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
])
def test_password_length_too_short(self):
self.url_params['password'] = 'aaa'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too short. It must contain at least 6 characters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
])
def test_password_length_long_enough(self):
self.url_params['password'] = 'ThisIsALongerPassword'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MaximumLengthValidator', {'max_length': 12})
])
def test_password_length_too_long(self):
self.url_params['password'] = 'ThisPasswordIsWayTooLong'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too long. It must contain no more than 12 characters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3})
])
def test_password_not_enough_uppercase(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 uppercase letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3})
])
def test_password_enough_uppercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3})
])
def test_password_not_enough_lowercase(self):
self.url_params['password'] = 'THISSHOULDFAIL'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 lowercase letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3})
])
def test_password_enough_lowercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3})
])
def test_not_enough_punctuations(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 punctuation marks.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3})
])
def test_enough_punctuations(self):
self.url_params['password'] = 'Th!sSh.uldPa$*'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3})
])
def test_not_enough_numeric_characters(self):
# The unicode ២ is the number 2 in Khmer and the ٧ is the Arabic-Indic number 7
self.url_params['password'] = u'thisShouldFail២٧'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 numbers.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3})
])
def test_enough_numeric_characters(self):
# The unicode ២ is the number 2 in Khmer
self.url_params['password'] = u'thisShouldPass២33'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 3})
])
def test_not_enough_alphabetic_characters(self):
self.url_params['password'] = '123456ab'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password must contain at least 3 letters.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 3})
])
def test_enough_alphabetic_characters(self):
self.url_params['password'] = u'𝒯𝓗Ï𝓼𝒫å𝓼𝓼𝔼𝓼'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 3}),
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3}),
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3}),
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3}),
])
def test_multiple_errors_fail(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
errstring = (
"This password must contain at least 3 uppercase letters. "
"This password must contain at least 3 numbers. "
"This password must contain at least 3 punctuation marks."
)
self.assertEqual(obj['value'], errstring)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 3}),
create_validator_config('util.password_policy_validators.UppercaseValidator', {'min_upper': 3}),
create_validator_config('util.password_policy_validators.LowercaseValidator', {'min_lower': 3}),
create_validator_config('util.password_policy_validators.NumericValidator', {'min_numeric': 3}),
create_validator_config('util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3}),
])
def test_multiple_errors_pass(self):
self.url_params['password'] = u'tH1s Sh0u!d P3#$!'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.CommonPasswordValidator')
])
def test_common_password_fail(self):
self.url_params['password'] = 'password'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"This password is too common.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.CommonPasswordValidator')
])
def test_common_password_pass(self):
self.url_params['password'] = 'this_is_ok'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6}),
create_validator_config('util.password_policy_validators.MaximumLengthValidator', {'max_length': 75}),
])
def test_with_unicode(self):
self.url_params['password'] = u'四節比分和七年前'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('util.password_policy_validators.MinimumLengthValidator', {'min_length': 6})
], SESSION_ENGINE='django.contrib.sessions.backends.cache')
def test_ext_auth_password_length_too_short(self):
"""
Tests that even if password policy is enforced, ext_auth registrations aren't subject to it
"""
self.url_params['password'] = u'aaa' # shouldn't pass validation
request = self.request_factory.post(self.url, self.url_params)
request.site = SiteFactory.create()
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
internal_password=self.url_params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
with patch('edxmako.request_context.get_current_request', return_value=request):
response = create_account(request)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
class TestUsernamePasswordNonmatch(TestCase):
"""
Test that registration username and password fields differ
"""
def setUp(self):
super(TestUsernamePasswordNonmatch, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.UserAttributeSimilarityValidator')
])
def test_with_username_password_match(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "foobar"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"The password is too similar to the username.",
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.UserAttributeSimilarityValidator')
])
def test_with_username_password_nonmatch(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "nonmatch"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
| agpl-3.0 |
alaski/nova | nova/scheduler/filters/exact_disk_filter.py | 18 | 1846 | # Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactDiskFilter(filters.BaseHostFilter):
"""Exact Disk Filter."""
def host_passes(self, host_state, spec_obj):
"""Return True if host has the exact amount of disk available."""
requested_disk = (1024 * (spec_obj.root_gb +
spec_obj.ephemeral_gb) +
spec_obj.swap)
if requested_disk != host_state.free_disk_mb:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_disk)s MB usable disk, it "
"has %(usable_disk_mb)s.",
{'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': host_state.free_disk_mb})
return False
# NOTE(mgoddard): Setting the limit ensures that it is enforced in
# compute. This ensures that if multiple instances are scheduled to a
# single host, then all after the first will fail in the claim.
host_state.limits['disk_gb'] = host_state.total_usable_disk_gb
return True
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/tpu/models/official/resnet/resnet_main.py | 5 | 27064 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from official.resnet import imagenet_input
from official.resnet import lars_util
from official.resnet import resnet_model
from tensorflow.contrib import summary
from tensorflow.contrib.tpu.python.tpu import async_checkpoint
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.estimator import estimator
FLAGS = flags.FLAGS
FAKE_DATA_DIR = 'gs://cloud-tpu-test-datasets/fake_imagenet'
flags.DEFINE_bool(
'use_tpu', default=True,
help=('Use TPU to execute the model for training and evaluation. If'
' --use_tpu=false, will use whatever devices are available to'
' TensorFlow by default (e.g. CPU and GPU)'))
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific flags
flags.DEFINE_string(
'data_dir', default=FAKE_DATA_DIR,
help=('The directory where the ImageNet input data is stored. Please see'
' the README.md for the expected data format.'))
flags.DEFINE_string(
'model_dir', default=None,
help=('The directory where the model and training/evaluation summaries are'
' stored.'))
flags.DEFINE_integer(
'resnet_depth', default=50,
help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'
' 200}. ResNet-18 and 34 use the pre-activation residual blocks'
' without bottleneck layers. The other models use pre-activation'
' bottleneck layers. Deeper models require more training time and'
' more memory and may require reducing --train_batch_size to prevent'
' running out of memory.'))
flags.DEFINE_string(
'mode', default='train_and_eval',
help='One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_integer(
'train_steps', default=112590,
help=('The number of steps to use for training. Default is 112590 steps'
' which is approximately 90 epochs at batch size 1024. This flag'
' should be adjusted according to the --train_batch_size flag.'))
flags.DEFINE_integer(
'train_batch_size', default=1024, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=1024, help='Batch size for evaluation.')
flags.DEFINE_integer(
'num_train_images', default=1281167, help='Size of training data set.')
flags.DEFINE_integer(
'num_eval_images', default=50000, help='Size of evaluation data set.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='Number of classes, at least 2')
flags.DEFINE_integer(
'steps_per_eval', default=1251,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_integer(
'eval_timeout',
default=None,
help='Maximum seconds between checkpoints before evaluation terminates.')
flags.DEFINE_bool(
'skip_host_call', default=False,
help=('Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.'))
flags.DEFINE_integer(
'iterations_per_loop', default=1251,
help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'
' If the number of iterations in the loop would exceed the number of'
' train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_integer(
'num_parallel_calls', default=64,
help=('Number of parallel threads in CPU for the input pipeline'))
flags.DEFINE_integer(
'num_cores', default=8,
help=('Number of TPU cores. For a single TPU device, this is 8 because each'
' TPU has 4 chips each with 2 cores.'))
flags.DEFINE_string(
'bigtable_project', None,
'The Cloud Bigtable project. If None, --gcp_project will be used.')
flags.DEFINE_string(
'bigtable_instance', None,
'The Cloud Bigtable instance to load data from.')
flags.DEFINE_string(
'bigtable_table', 'imagenet',
'The Cloud Bigtable table to load data from.')
flags.DEFINE_string(
'bigtable_train_prefix', 'train_',
'The prefix identifying training rows.')
flags.DEFINE_string(
'bigtable_eval_prefix', 'validation_',
'The prefix identifying evaluation rows.')
flags.DEFINE_string(
'bigtable_column_family', 'tfexample',
'The column family storing TFExamples.')
flags.DEFINE_string(
'bigtable_column_qualifier', 'example',
'The column name storing TFExamples.')
flags.DEFINE_string(
'data_format', default='channels_last',
help=('A flag to override the data format used in the model. The value'
' is either channels_first or channels_last. To run the network on'
' CPU or TPU, channels_last should be used. For GPU, channels_first'
' will improve performance.'))
# TODO(chrisying): remove this flag once --transpose_tpu_infeed flag is enabled
# by default for TPU
flags.DEFINE_bool(
'transpose_input', default=True,
help='Use TPU double transpose optimization')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_bool(
'export_to_tpu', default=False,
help=('Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.'))
flags.DEFINE_string(
'precision', default='bfloat16',
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_float(
'base_learning_rate', default=0.1,
help=('Base learning rate when train batch size is 256.'))
flags.DEFINE_float(
'momentum', default=0.9,
help=('Momentum parameter used in the MomentumOptimizer.'))
flags.DEFINE_float(
'weight_decay', default=1e-4,
help=('Weight decay coefficiant for l2 regularization.'))
flags.DEFINE_float(
'label_smoothing', default=0.0,
help=('Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_integer('log_step_count_steps', 64, 'The number of steps at '
'which the global step information is logged.')
flags.DEFINE_bool('enable_lars',
default=False,
help=('Enable LARS optimizer for large batch training.'))
flags.DEFINE_float('poly_rate', default=0.0,
help=('Set LARS/Poly learning rate.'))
flags.DEFINE_bool(
'use_cache', default=True, help=('Enable cache for training input.'))
flags.DEFINE_bool(
'use_async_checkpointing', default=False, help=('Enable async checkpoint'))
# Learning rate schedule
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step.
After 5 epochs we reach the base learning rate (scaled to account
for batch size).
After 30, 60 and 80 epochs the learning rate is divided by 10.
After 90 epochs training stops and the LR is set to 0. This ensures
that we train for exactly 90 epochs for reproducibility.
Args:
current_epoch: `Tensor` for current epoch.
Returns:
A scaled `Tensor` for current learning rate.
"""
scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)
decay_rate = (scaled_lr * LR_SCHEDULE[0][0] *
current_epoch / LR_SCHEDULE[0][1])
for mult, start_epoch in LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch,
decay_rate, scaled_lr * mult)
return decay_rate
def resnet_model_fn(features, labels, mode, params):
"""The model_fn for ResNet to be used with TPUEstimator.
Args:
features: `Tensor` of batched images.
labels: `Tensor` of labels for the data samples
mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`
params: `dict` of parameters passed to the model from the TPUEstimator,
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `TPUEstimatorSpec` for the model
"""
if isinstance(features, dict):
features = features['feature']
# In most cases, the default data format NCHW instead of NHWC should be
# used for a significant performance boost on GPU/TPU. NHWC should be used
# only if the network needs to be run on CPU since the pooling operations
# are only supported on NHWC.
if FLAGS.data_format == 'channels_first':
assert not FLAGS.transpose_input # channels_first only for GPU
features = tf.transpose(features, [0, 3, 1, 2])
if FLAGS.transpose_input and mode != tf.estimator.ModeKeys.PREDICT:
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
network = resnet_model.resnet_v1(
resnet_depth=FLAGS.resnet_depth,
num_classes=FLAGS.num_label_classes,
data_format=FLAGS.data_format)
return network(
inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits = build_network()
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits = build_network()
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
# If necessary, in the model_fn, use params['batch_size'] instead the batch
# size flags (--train_batch_size or --eval_batch_size).
batch_size = params['batch_size'] # pylint: disable=unused-variable
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
label_smoothing=FLAGS.label_smoothing)
# Add weight decay to the loss for non-batch-normalization variables.
loss = cross_entropy + FLAGS.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()
if 'batch_normalization' not in v.name])
host_call = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Compute the current epoch and associated learning rate from global_step.
global_step = tf.train.get_global_step()
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
current_epoch = (tf.cast(global_step, tf.float32) /
steps_per_epoch)
# LARS is a large batch optimizer. LARS enables higher accuracy at batch 16K
# and larger batch sizes.
if FLAGS.train_batch_size >= 16384 and FLAGS.enable_lars:
learning_rate = 0.0
optimizer = lars_util.init_lars_optimizer(current_epoch)
else:
learning_rate = learning_rate_schedule(current_epoch)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=FLAGS.momentum,
use_nesterov=True)
if FLAGS.use_tpu:
# When using TPU, wrap the optimizer with CrossShardOptimizer which
# handles synchronization details between different TPU cores. To the
# user, this should look like regular synchronous training.
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
# Host call fns are executed FLAGS.iterations_per_loop times after one
# TPU loop is finished, setting max_queue value to the same as number of
# iterations will make the summary writer only flush the data to storage
# once per loop.
with summary.create_file_writer(
FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with summary.always_record_summaries():
summary.scalar('loss', loss[0], step=gs)
summary.scalar('learning_rate', lr[0], step=gs)
summary.scalar('current_epoch', ce[0], step=gs)
return summary.all_summary_ops()
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'top_1_accuracy': top_1_accuracy,
'top_5_accuracy': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
def _verify_non_empty_string(value, field_name):
"""Ensures that a given proposed field value is a non-empty string.
Args:
value: proposed value for the field.
field_name: string name of the field, e.g. `project`.
Returns:
The given value, provided that it passed the checks.
Raises:
ValueError: the value is not a string, or is a blank string.
"""
if not isinstance(value, str):
raise ValueError(
'Bigtable parameter "%s" must be a string.' % field_name)
if not value:
raise ValueError(
'Bigtable parameter "%s" must be non-empty.' % field_name)
return value
def _select_tables_from_flags():
"""Construct training and evaluation Bigtable selections from flags.
Returns:
[training_selection, evaluation_selection]
"""
project = _verify_non_empty_string(
FLAGS.bigtable_project or FLAGS.gcp_project,
'project')
instance = _verify_non_empty_string(FLAGS.bigtable_instance, 'instance')
table = _verify_non_empty_string(FLAGS.bigtable_table, 'table')
train_prefix = _verify_non_empty_string(FLAGS.bigtable_train_prefix,
'train_prefix')
eval_prefix = _verify_non_empty_string(FLAGS.bigtable_eval_prefix,
'eval_prefix')
column_family = _verify_non_empty_string(FLAGS.bigtable_column_family,
'column_family')
column_qualifier = _verify_non_empty_string(FLAGS.bigtable_column_qualifier,
'column_qualifier')
return [
imagenet_input.BigtableSelection(
project=project,
instance=instance,
table=table,
prefix=p,
column_family=column_family,
column_qualifier=column_qualifier)
for p in (train_prefix, eval_prefix)
]
def main(unused_argv):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu if (FLAGS.tpu or FLAGS.use_tpu) else '',
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
if FLAGS.use_async_checkpointing:
save_checkpoints_steps = None
else:
save_checkpoints_steps = max(100, FLAGS.iterations_per_loop)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
log_step_count_steps=FLAGS.log_step_count_steps,
session_config=tf.ConfigProto(
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True))),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig
.PER_HOST_V2)) # pylint: disable=line-too-long
resnet_classifier = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
use_bfloat16 = FLAGS.precision == 'bfloat16'
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
if FLAGS.bigtable_instance:
tf.logging.info('Using Bigtable dataset, table %s', FLAGS.bigtable_table)
select_train, select_eval = _select_tables_from_flags()
imagenet_train, imagenet_eval = [imagenet_input.ImageNetBigtableInput(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=FLAGS.transpose_input,
selection=selection) for (is_training, selection) in
[(True, select_train),
(False, select_eval)]]
else:
if FLAGS.data_dir == FAKE_DATA_DIR:
tf.logging.info('Using fake dataset.')
else:
tf.logging.info('Using dataset: %s', FLAGS.data_dir)
imagenet_train, imagenet_eval = [
imagenet_input.ImageNetInput(
is_training=is_training,
data_dir=FLAGS.data_dir,
transpose_input=FLAGS.transpose_input,
cache=FLAGS.use_cache and is_training,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=use_bfloat16) for is_training in [True, False]
]
steps_per_epoch = FLAGS.num_train_images // FLAGS.train_batch_size
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
if FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Eval results: %s. Elapsed seconds: %d',
eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', ckpt)
else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
steps_per_epoch = FLAGS.num_train_images // FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.',
FLAGS.train_steps,
FLAGS.train_steps / steps_per_epoch,
current_step)
start_timestamp = time.time() # This time will include compilation time
if FLAGS.mode == 'train':
hooks = []
if FLAGS.use_async_checkpointing:
hooks.append(
async_checkpoint.AsyncCheckpointSaverHook(
checkpoint_dir=FLAGS.model_dir,
save_steps=max(100, FLAGS.iterations_per_loop)))
resnet_classifier.train(
input_fn=imagenet_train.input_fn,
max_steps=FLAGS.train_steps,
hooks=hooks)
else:
assert FLAGS.mode == 'train_and_eval'
while current_step < FLAGS.train_steps:
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
FLAGS.train_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
next_checkpoint, int(time.time() - start_timestamp))
# Evaluate the model on the most recent model in --model_dir.
# Since evaluation happens in batches of --eval_batch_size, some images
# may be excluded modulo the batch size. As long as the batch size is
# consistent, the evaluated images are also consistent.
tf.logging.info('Starting to evaluate.')
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=FLAGS.num_eval_images // FLAGS.eval_batch_size)
tf.logging.info('Eval results at step %d: %s',
next_checkpoint, eval_results)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
FLAGS.train_steps, elapsed_time)
if FLAGS.export_dir is not None:
# The guide to serve a exported TensorFlow model is at:
# https://www.tensorflow.org/serving/serving_basic
tf.logging.info('Starting to export model.')
resnet_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=imagenet_input.image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| apache-2.0 |
eaudeweb/xhtml2pdf | setup_version.py | 61 | 1771 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Updates the version infos
"""
import time
import re
import cgi
VERSION = open("VERSION.txt", "r").read().strip()
BUILD = time.strftime("%Y-%m-%d")
FILES = [
"setup.py",
"setup_exe.py",
# "setup_egg.py",
"sx/pisa3/pisa_version.py",
"doc/pisa-en.html",
]
try:
HELP = cgi.escape(open("HELP.txt", "r").read(), 1)
except:
HELP = ""
HELP = "<!--HELP--><pre>" + HELP + "</pre><!--HELP-->"
rxversion = re.compile("VERSION{.*?}VERSION", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxbuild = re.compile("BUILD{.*?}BUILD", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxversionhtml = re.compile("\<\!--VERSION--\>.*?\<\!--VERSION--\>", re.MULTILINE|re.IGNORECASE|re.DOTALL)
rxhelphtml = re.compile("\<\!--HELP--\>.*?\<\!--HELP--\>", re.MULTILINE|re.IGNORECASE|re.DOTALL)
for fname in FILES:
print "Update", fname, "..."
data = open(fname, "rb").read()
data = rxversion.sub("VERSION{" + VERSION + "}VERSION", data)
data = rxversionhtml.sub("<!--VERSION-->" + VERSION + "<!--VERSION-->", data)
data = rxbuild.sub("BUILD{" + BUILD + "}BUILD", data)
data = rxhelphtml.sub(HELP, data)
open(fname, "wb").write(data)
| apache-2.0 |
vlachoudis/sl4a | python/src/Lib/encodings/cp850.py | 593 | 34361 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
fredrikw/scipy | scipy/weave/examples/cast_copy_transpose.py | 100 | 5687 | """ Cast Copy Tranpose is used in numpy LinearAlgebra.py to convert
C ordered arrays to Fortran order arrays before calling Fortran
functions. A couple of C implementations are provided here that
show modest speed improvements. One is an "inplace" transpose that
does an in memory transpose of an arrays elements. This is the
fastest approach and is beneficial if you don't need to keep the
original array.
"""
# C:\home\ej\wrk\scipy\compiler\examples>python cast_copy_transpose.py
# Cast/Copy/Transposing (150,150)array 1 times
# speed in python: 0.870999932289
# speed in c: 0.25
# speed up: 3.48
# inplace transpose c: 0.129999995232
# speed up: 6.70
from __future__ import absolute_import, print_function
import numpy
from numpy import *
import sys
sys.path.insert(0,'..')
import scipy.weave.inline_tools as inline_tools
import scipy.weave.c_spec as c_spec
from scipy.weave.converters import blitz as cblitz
def _cast_copy_transpose(type,a_2d):
assert(len(shape(a_2d)) == 2)
new_array = zeros(shape(a_2d),type)
code = """
for(int i = 0; i < Na_2d[0]; i++)
for(int j = 0; j < Na_2d[1]; j++)
new_array(i,j) = a_2d(j,i);
"""
inline_tools.inline(code,['new_array','a_2d'],
type_converters=cblitz,
compiler='gcc',
verbose=1)
return new_array
def _cast_copy_transpose2(type,a_2d):
assert(len(shape(a_2d)) == 2)
new_array = zeros(shape(a_2d),type)
code = """
const int I = Na_2d[0];
const int J = Na_2d[1];
for(int i = 0; i < I; i++)
{
int new_off = i*J;
int old_off = i;
for(int j = 0; j < J; j++)
{
new_array[new_off++] = a_2d[old_off];
old_off += I;
}
}
"""
inline_tools.inline(code,['new_array','a_2d'],compiler='gcc',verbose=1)
return new_array
def _inplace_transpose(a_2d):
assert(len(shape(a_2d)) == 2)
numeric_type = c_spec.num_to_c_types[a_2d.dtype.char]
code = """
%s temp;
for(int i = 0; i < Na_2d[0]; i++)
for(int j = 0; j < Na_2d[1]; j++)
{
temp = a_2d(i,j);
a_2d(i,j) = a_2d(j,i);
a_2d(j,i) = temp;
}
""" % numeric_type
inline_tools.inline(code,['a_2d'],
type_converters=cblitz,
compiler='gcc',
extra_compile_args=['-funroll-all-loops'],
verbose=2)
return a_2d
#assert(len(shape(a_2d)) == 2)
#type = a_2d.typecode()
#new_array = zeros(shape(a_2d),type)
##trans_a_2d = transpose(a_2d)
#numeric_type = c_spec.num_to_c_types[type]
#code = """
# for(int i = 0; i < Na_2d[0]; i++)
# for(int j = 0; j < Na_2d[1]; j++)
# new_array(i,j) = (%s) a_2d(j,i);
# """ % numeric_type
#inline_tools.inline(code,['new_array','a_2d'],
# type_converters = cblitz,
# compiler='gcc',
# verbose = 1)
#return new_array
def cast_copy_transpose(type,*arrays):
results = []
for a in arrays:
results.append(_cast_copy_transpose(type,a))
if len(results) == 1:
return results[0]
else:
return results
def cast_copy_transpose2(type,*arrays):
results = []
for a in arrays:
results.append(_cast_copy_transpose2(type,a))
if len(results) == 1:
return results[0]
else:
return results
def inplace_cast_copy_transpose(*arrays):
results = []
for a in arrays:
results.append(_inplace_transpose(a))
if len(results) == 1:
return results[0]
else:
return results
def _castCopyAndTranspose(type, *arrays):
cast_arrays = ()
import copy
for a in arrays:
if a.dtype == numpy.dtype(type):
cast_arrays = cast_arrays + (copy.copy(numpy.transpose(a)),)
else:
cast_arrays = cast_arrays + (copy.copy(
numpy.transpose(a).astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
import time
def compare(m,n):
a = ones((n,n),float64)
type = float32
print('Cast/Copy/Transposing (%d,%d)array %d times' % (n,n,m))
t1 = time.time()
for i in range(m):
for i in range(n):
b = _castCopyAndTranspose(type,a)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1)/m)
# load into cache
b = cast_copy_transpose(type,a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = cast_copy_transpose(type,a)
t2 = time.time()
print(' speed in c (blitz):',(t2 - t1) / m)
print(' speed up (blitz): %3.2f' % (py/(t2-t1)))
# load into cache
b = cast_copy_transpose2(type,a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = cast_copy_transpose2(type,a)
t2 = time.time()
print(' speed in c (pointers):',(t2 - t1) / m)
print(' speed up (pointers): %3.2f' % (py/(t2-t1)))
# inplace tranpose
b = _inplace_transpose(a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = _inplace_transpose(a)
t2 = time.time()
print(' inplace transpose c:',(t2 - t1) / m)
print(' speed up: %3.2f' % (py/(t2-t1)))
if __name__ == "__main__":
m,n = 1,500
compare(m,n)
| bsd-3-clause |
gmalmquist/pants | tests/python/pants_test/backend/codegen/tasks/test_ragel_gen.py | 12 | 2331 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.codegen.targets.java_ragel_library import JavaRagelLibrary
from pants.backend.codegen.tasks.ragel_gen import RagelGen, calculate_genfile
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_mkdtemp
from pants_test.tasks.task_test_base import TaskTestBase
ragel_file_contents = dedent("""
package com.example.atoi;
%%{
machine parser;
action minus {
negative = true;
}
action digit {
val *= 10;
val += fc - '0';
}
main := ( '-'@minus )? ( [0-9] @digit ) + '\0';
}%%
public class Parser {
%% write data;
public static int parse(CharSequence input) {
StringBuilder builder = new StringBuilder(input);
builder.append('\0');
char[] data = builder.toString().toCharArray();
int p = 0;
int pe = data.length;
int eof = pe;
int cs;
boolean negative = false;
int val = 0;
%% write init;
%% write exec;
if (negative)
return -val;
else
return val;
}
}
""")
class RagelGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return RagelGen
def test_ragel_gen(self):
self.create_file(relpath='test_ragel_gen/atoi.rl', contents=ragel_file_contents)
target = self.make_target(spec='test_ragel_gen:atoi',
target_type=JavaRagelLibrary,
sources=['atoi.rl'])
task = self.create_task(self.context(target_roots=[target]))
target_workdir = safe_mkdtemp(dir=self.test_workdir)
task.execute_codegen(target, target_workdir)
generated_files = []
for root, _, files in os.walk(target_workdir):
generated_files.extend(os.path.relpath(os.path.join(root, f), target_workdir) for f in files)
self.assertEqual(['com/example/atoi/Parser.java'], generated_files)
def test_smoke(self):
with temporary_file() as fp:
fp.write(ragel_file_contents)
fp.flush()
self.assertEquals(calculate_genfile(fp.name), 'com/example/atoi/Parser.java')
| apache-2.0 |
yinchunlong/abelkhan-1 | ext/c++/thirdpart/c++/boost/tools/build/src/build/feature.py | 11 | 33759 | # Status: ported, except for unit tests.
# Base revision: 64488
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2002, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
from b2.util import utility, bjam_signature, is_iterable_typed
import b2.util.set
from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, to_seq
from b2.exceptions import *
__re_split_subfeatures = re.compile ('<(.*):(.*)>')
__re_no_hyphen = re.compile ('^([^:]+)$')
__re_slash_or_backslash = re.compile (r'[\\/]')
class Feature(object):
# Map from string attribute names to integers bit flags.
# This will be initialized after declaration of the class.
_attribute_name_to_integer = {}
def __init__(self, name, values, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
assert is_iterable_typed(attributes, basestring)
self._name = name
self._values = values
self._default = None
self._attributes = 0
for a in attributes:
self._attributes = self._attributes | Feature._attribute_name_to_integer[a]
self._attributes_string_list = attributes
self._subfeatures = []
self._parent = None
def name(self):
return self._name
def values(self):
return self._values
def add_values(self, values):
assert is_iterable_typed(values, basestring)
self._values.extend(values)
def attributes(self):
return self._attributes
def set_default(self, value):
assert isinstance(value, basestring)
for attr in ('free', 'optional'):
if getattr(self, attr)():
get_manager().errors()('"{}" feature "<{}>" cannot have a default value.'
.format(attr, self._name))
self._default = value
def default(self):
return self._default
# FIXME: remove when we fully move to using classes for features/properties
def attributes_string_list(self):
return self._attributes_string_list
def subfeatures(self):
return self._subfeatures
def add_subfeature(self, name):
assert isinstance(name, Feature)
self._subfeatures.append(name)
def parent(self):
"""For subfeatures, return pair of (parent_feature, value).
Value may be None if this subfeature is not specific to any
value of the parent feature.
"""
return self._parent
def set_parent(self, feature, value):
assert isinstance(feature, Feature)
assert isinstance(value, basestring)
self._parent = (feature, value)
def __str__(self):
return self._name
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __features_with_attributes, __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# The list with all attribute names.
__all_attributes = [ 'implicit',
'composite',
'optional',
'symmetric',
'free',
'incidental',
'path',
'dependency',
'propagated',
'link-incompatible',
'subfeature',
'order-sensitive'
]
i = 1
for a in __all_attributes:
setattr(Feature, a.upper(), i)
Feature._attribute_name_to_integer[a] = i
def probe(self, flag=i):
return getattr(self, "_attributes") & flag
setattr(Feature, a.replace("-", "_"), probe)
i = i << 1
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
__features_with_attributes = {}
for attribute in __all_attributes:
__features_with_attributes [attribute] = []
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = []
reset ()
def enumerate ():
""" Returns an iterator to the features map.
"""
return __all_features.iteritems ()
def get(name):
"""Return the Feature instance for the specified name.
Throws if no feature by such name exists
"""
assert isinstance(name, basestring)
return __all_features[name]
# FIXME: prepare-test/finish-test?
@bjam_signature((["name"], ["values", "*"], ["attributes", "*"]))
def feature (name, values, attributes = []):
""" Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)
"""
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
for attribute in attributes:
__features_with_attributes [attribute].append (name)
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature
@bjam_signature((["feature"], ["value"]))
def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
attributes = f.attributes()
bad_attribute = None
if attributes & Feature.FREE:
bad_attribute = "free"
elif attributes & Feature.OPTIONAL:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, feature.name()))
if not value in f.values():
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values())
f.set_default(value)
def defaults(features):
""" Returns the default property values for the given features.
"""
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free() and not f.optional() and f.default():
result.append(property.Property(f, f.default()))
return result
def valid (names):
""" Returns true iff all elements of names are valid features.
"""
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names)
def attributes (feature):
""" Returns the attributes of the given feature.
"""
assert isinstance(feature, basestring)
return __all_features[feature].attributes_string_list()
def values (feature):
""" Return the values of the given feature.
"""
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values()
def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
"""
assert isinstance(value_string, basestring)
if __implicit_features.has_key(value_string):
return __implicit_features[value_string]
v = value_string.split('-')
if not __implicit_features.has_key(v[0]):
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True
def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if not __implicit_features.has_key(components[0]):
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]]
def __find_implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
try:
return __subfeature_from_value[feature][value_string][subvalue]
except KeyError:
return None
# Given a feature and a value of one of its subfeatures, find the name
# of the subfeature. If value-string is supplied, looks for implied
# subfeatures that are specific to that value of feature
# feature # The main feature name
# subvalue # The value of one of its subfeatures
# value-string # The value of the main feature
def implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
result = __find_implied_subfeature (feature, subvalue, value_string)
if not result:
raise InvalidValue ("'%s' is not a known subfeature value of '%s%s'" % (subvalue, feature, value_string))
return result
def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
"""
assert isinstance(name, basestring)
if not __all_features.has_key(name):
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name]
# Uses Property
def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature()
v = property_.value()
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result
def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature().subfeature():
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result
# rule extend was defined as below:
# Can be called three ways:
#
# 1. extend feature : values *
# 2. extend <feature> subfeature : values *
# 3. extend <feature>value-string subfeature : values *
#
# * Form 1 adds the given values to the given feature
# * Forms 2 and 3 add subfeature values to the given feature
# * Form 3 adds the subfeature values as specific to the given
# property value-string.
#
#rule extend ( feature-or-property subfeature ? : values * )
#
# Now, the specific rule must be called, depending on the desired operation:
# extend_feature
# extend_subfeature
def extend (name, values):
""" Adds the given values to the given feature.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit():
for v in values:
if __implicit_features.has_key(v):
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values() and not(feature.free() or feature.optional()):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values)
def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
"""
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free() or value_string in f.values():
return
values = [value_string]
if f.subfeatures():
if not value_string in f.values() and \
not value_string in f.subfeatures():
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values() and \
(values[0] or not f.optional()):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name(), f.values()))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0])
""" Extends the given subfeature with the subvalues. If the optional
value-string is provided, the subvalues are only valid for the given
value of the feature. Thus, you could say that
<target-platform>mingw is specifc to <toolset>gcc-2.95.2 as follows:
extend-subfeature toolset gcc-2.95.2 : target-platform : mingw ;
feature: The feature whose subfeature is being extended.
value-string: If supplied, specifies a specific value of the
main feature for which the new subfeature values
are valid.
subfeature: The name of the subfeature.
subvalues: The additional values of the subfeature being defined.
"""
def extend_subfeature (feature_name, value_string, subfeature_name, subvalues):
assert isinstance(feature_name, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature_name, basestring)
assert is_iterable_typed(subvalues, basestring)
feature = validate_feature(feature_name)
if value_string:
validate_value_string(feature, value_string)
subfeature_name = feature_name + '-' + __get_subfeature_name (subfeature_name, value_string)
extend(subfeature_name, subvalues) ;
subfeature = __all_features[subfeature_name]
if value_string == None: value_string = ''
if not __subfeature_from_value.has_key(feature):
__subfeature_from_value [feature] = {}
if not __subfeature_from_value[feature].has_key(value_string):
__subfeature_from_value [feature][value_string] = {}
for subvalue in subvalues:
__subfeature_from_value [feature][value_string][subvalue] = subfeature
@bjam_signature((["feature_name", "value_string", "?"], ["subfeature"],
["subvalues", "*"], ["attributes", "*"]))
def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures():
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues)
@bjam_signature((["composite_property_s"], ["component_properties_s", "*"]))
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature()
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite():
raise BaseException ("'%s' is not a composite feature" % f)
if __composite_properties.has_key(property):
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties
def expand_composite(property_):
if __debug__:
from .property import Property
assert isinstance(property_, Property)
result = [ property_ ]
if __composite_properties.has_key(property_):
for p in __composite_properties[property_]:
result.extend(expand_composite(p))
return result
@bjam_signature((['feature'], ['properties', '*']))
def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
"""
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result
def free_features ():
""" Returns all free features.
"""
return __free_features
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature() for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature()
if f.free():
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature() == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name(), [r.value() for r in result if r.feature() == f] + [x.value()], p))
else:
result.append (x)
elif any(r.feature() == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value() for r in result if r.feature() == f], p, x.value()))
else:
result.append (x)
return result
# Uses Property
def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature():
return False
p = f.parent()
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature():
return False
if parent_value and parent_value != parent_property.value():
return False
return True
def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature())
# Returns true iff the subvalue is valid for the feature. When the
# optional value-string is provided, returns true iff the subvalues
# are valid for the given value of the feature.
def is_subvalue(feature, value_string, subfeature, subvalue):
assert isinstance(feature, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature, basestring)
assert isinstance(subvalue, basestring)
if not value_string:
value_string = ''
try:
return __subfeature_from_value[feature][value_string][subvalue] == subfeature
except KeyError:
return False
# Uses Property
def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded)
# Accepts list of Property objects
def add_defaults (properties):
""" Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
result = [x for x in properties]
handled_features = set()
for p in properties:
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
if not p.condition():
handled_features.add(p.feature())
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
for p in more:
handled_features.add(p.feature())
# Add defaults for subfeatures of features which are present
for p in result[:]:
s = p.feature().subfeatures()
more = defaults([s for s in p.feature().subfeatures() if not s in handled_features])
for p in more:
handled_features.add(p.feature())
result.extend(more)
return result
def minimize (properties):
""" Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
for property in properties:
if __composite_properties.has_key (property):
components.extend(__composite_properties[property])
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature().subfeature()] +\
[p for p in properties if p.feature().subfeature()]
result = []
while properties:
p = properties[0]
f = p.feature()
# locate all subproperties of $(x[1]) in the property set
subproperties = __select_subproperties (p, properties)
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature(), p.value() + '-' + '-'.join ([sp.value() for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value() != f.default() or f.symmetric():
result.append (p)
#\
#or get_grist (fullp) in get_grist (components):
# FIXME: restore above
properties = properties[1:]
return result
def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result
def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
"""
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature()
if not f.subfeature():
subs = __select_subproperties (p, properties)
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value() for sub in subs)
result.append(Property(
p.feature(), p.value() + '-' + subvalues,
p.condition()))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result
######################################################################################
# Private methods
def __select_subproperties (parent_property, properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(parent_property, Property)
return [ x for x in properties if __is_subproperty_of (parent_property, x) ]
def __get_subfeature_name (subfeature, value_string):
assert isinstance(subfeature, basestring)
assert isinstance(value_string, basestring) or value_string is None
if value_string == None:
prefix = ''
else:
prefix = value_string + ':'
return prefix + subfeature
def __validate_feature_attributes (name, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(attributes, basestring)
for attribute in attributes:
if not attribute in __all_attributes:
raise InvalidAttribute ("unknown attributes: '%s' in feature declaration: '%s'" % (str (b2.util.set.difference (attributes, __all_attributes)), name))
if name in __all_features:
raise AlreadyDefined ("feature '%s' already defined" % name)
elif 'implicit' in attributes and 'free' in attributes:
raise InvalidAttribute ("free features cannot also be implicit (in declaration of feature '%s')" % name)
elif 'free' in attributes and 'propagated' in attributes:
raise InvalidAttribute ("free features cannot also be propagated (in declaration of feature '%s')" % name)
def __validate_feature (feature):
""" Generates an error if the feature is unknown.
"""
assert isinstance(feature, basestring)
if not __all_features.has_key (feature):
raise BaseException ('unknown feature "%s"' % feature)
def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)]
# FIXME: copy over tests.
| mit |
rockfruit/bika.lims | bika/lims/browser/analysisrequest/results_not_requested.py | 1 | 2747 | # This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.permissions import *
from bika.lims.browser.analysisrequest import AnalysisRequestManageResultsView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from plone.app.layout.globals.interfaces import IViewView
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
import plone
class AnalysisRequestResultsNotRequestedView(AnalysisRequestManageResultsView):
implements(IViewView)
template = ViewPageTemplateFile("templates/analysisrequest_analyses_not_requested.pt")
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is shown '
'for trace-ability purposes only. Retest: ${retest_child_id}.',
mapping={"retest_child_id":childid if childid else ''})
self.context.plone_utils.addPortalMessage(message, 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _(
'This Analysis Request has been generated automatically due to '
'the retraction of the Analysis Request ${retracted_request_id}.',
mapping={"retracted_request_id": par.getRequestID()})
self.context.plone_utils.addPortalMessage(message, 'info')
can_do = getSecurityManager().checkPermission(ResultsNotRequested, ar)
if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled":
self.request.response.redirect(ar.absolute_url())
elif not(can_do):
self.request.response.redirect(ar.absolute_url())
else:
return self.template()
| agpl-3.0 |
qnib/QNIBCollect | src/diamond/collectors/traceroute/traceroute.py | 7 | 3878 | # coding=utf-8
"""
Collect icmp round trip times per hop
#### Dependencies
* libparistraceroute1 (as paris-traceroute)
"""
import re
import diamond.collector
from subprocess import Popen, PIPE
class TracerouteCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(TracerouteCollector, self).get_default_config_help()
config_help.update({
'bin': "The path to the tracerouting library.",
'destport': "The target port number",
'hosts': "Hosts to run the traceroute command on",
'protocol': "The protocol to use for the traceroute pings (icmp, udp, tcp)",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(TracerouteCollector, self).get_default_config()
config.update({
'path': 'traceroute',
'hosts': { "yelp":"yelp.com" },
'protocol': 'icmp',
})
return config
def collect(self):
protocol_args = self._protocol_config()
if not protocol_args:
self.log.error(
"Please specify a protocol for the traceroute,\n"
+ " options (icmp, tcp, udp)"
)
return None
for pseudo_hostname, address in self.config.get('hosts', {}).iteritems():
metric_name = '.'.join([
pseudo_hostname,
'RoundTripTime',
])
if 'bin' not in self.config:
self.log.error(
"Please specify the path of the canonical binary"
)
return None
cmd = [self.config['bin'], '-nq1', '-w1', protocol_args, address]
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
errors = process.stderr.readline()
if errors:
self.log.error(
"Error running traceroute process: {0!s}".format(errors)
)
continue
while True:
line = process.stdout.readline()
if not line:
break
# A hop contains:
# hop, ip, rtt
# in that order.
hop_data = line.split()
if not hop_data or len(hop_data) not in [2, 3]:
continue
hop_number = ip = None
rtt = 0
try:
[hop_number, ip, rtt_ms] = hop_data
rtt = re.match('([0-9\.]+)ms', rtt_ms).group(1)
except ValueError as e:
[hop_number, ip] = hop_data
if hop_number is None or ip is None:
continue
rtt = float(rtt)
self.dimensions = {
'hop': hop_number,
}
if '*' not in ip:
self.dimensions['ip'] = ip
self.publish(metric_name, rtt)
except Exception as e:
self.log.error(
"Error running TracerouteCollector: {0!s}".format(e)
)
continue
def _protocol_config(self):
protocol = self.config['protocol'].lower()
destport = self.config.get('destport', 80)
if protocol == 'udp':
protocol_args = '-U'
elif protocol == 'tcp':
protocol_args = '-Tp{0!s}'.format(destport)
elif protocol == 'icmp':
protocol_args = '-I'
else:
return None
return protocol_args
| apache-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/pygments/lexers/modula2.py | 23 | 52564 | # -*- coding: utf-8 -*-
"""
pygments.lexers.modula2
~~~~~~~~~~~~~~~~~~~~~~~
Multi-Dialect Lexer for Modula-2.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Error
__all__ = ['Modula2Lexer']
# Multi-Dialect Modula-2 Lexer
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
The Modula-2 lexer supports several dialects. By default, it operates in
fallback mode, recognising the *combined* literals, punctuation symbols
and operators of all supported dialects, and the *combined* reserved words
and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
differentiating between library defined identifiers.
To select a specific dialect, a dialect option may be passed
or a dialect tag may be embedded into a source file.
Dialect Options:
`m2pim`
Select PIM Modula-2 dialect.
`m2iso`
Select ISO Modula-2 dialect.
`m2r10`
Select Modula-2 R10 dialect.
`objm2`
Select Objective Modula-2 dialect.
The PIM and ISO dialect options may be qualified with a language extension.
Language Extensions:
`+aglet`
Select Aglet Modula-2 extensions, available with m2iso.
`+gm2`
Select GNU Modula-2 extensions, available with m2pim.
`+p1`
Select p1 Modula-2 extensions, available with m2iso.
`+xds`
Select XDS Modula-2 extensions, available with m2iso.
Passing a Dialect Option via Unix Commandline Interface
Dialect options may be passed to the lexer using the `dialect` key.
Only one such option should be passed. If multiple dialect options are
passed, the first valid option is used, any subsequent options are ignored.
Examples:
`$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
Use ISO dialect to render input to HTML output
`$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
Use ISO dialect with p1 extensions to render input to RTF output
Embedding a Dialect Option within a source file
A dialect option may be embedded in a source file in form of a dialect
tag, a specially formatted comment that specifies a dialect option.
Dialect Tag EBNF::
dialectTag :
OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
dialectOption :
'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
Prefix : '!' ;
OpeningCommentDelim : '(*' ;
ClosingCommentDelim : '*)' ;
No whitespace is permitted between the tokens of a dialect tag.
In the event that a source file contains multiple dialect tags, the first
tag that contains a valid dialect option will be used and any subsequent
dialect tags will be ignored. Ideally, a dialect tag should be placed
at the beginning of a source file.
An embedded dialect tag overrides a dialect option set via command line.
Examples:
``(*!m2r10*) DEFINITION MODULE Foobar; ...``
Use Modula2 R10 dialect to render this source file.
``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
Use PIM dialect with GNU extensions to render this source file.
Algol Publication Mode:
In Algol publication mode, source text is rendered for publication of
algorithms in scientific papers and academic texts, following the format
of the Revised Algol-60 Language Report. It is activated by passing
one of two corresponding styles as an option:
`algol`
render reserved words lowercase underline boldface
and builtins lowercase boldface italic
`algol_nu`
render reserved words lowercase boldface (no underlining)
and builtins lowercase boldface italic
The lexer automatically performs the required lowercase conversion when
this mode is activated.
Example:
``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
Render input file in Algol publication mode to LaTeX output.
Rendering Mode of First Class ADT Identifiers:
The rendering of standard library first class ADT identifiers is controlled
by option flag "treat_stdlib_adts_as_builtins".
When this option is turned on, standard library ADT identifiers are rendered
as builtins. When it is turned off, they are rendered as ordinary library
identifiers.
`treat_stdlib_adts_as_builtins` (default: On)
The option is useful for dialects that support ADTs as first class objects
and provide ADTs in the standard library that would otherwise be built-in.
At present, only Modula-2 R10 supports library ADTs as first class objects
and therefore, no ADT identifiers are defined for any other dialects.
Example:
``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
Render standard library ADTs as ordinary library types.
.. versionadded:: 1.3
.. versionchanged:: 2.1
Added multi-dialect support.
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'dialecttags': [
# PIM Dialect Tag
(r'\(\*!m2pim\*\)', Comment.Special),
# ISO Dialect Tag
(r'\(\*!m2iso\*\)', Comment.Special),
# M2R10 Dialect Tag
(r'\(\*!m2r10\*\)', Comment.Special),
# ObjM2 Dialect Tag
(r'\(\*!objm2\*\)', Comment.Special),
# Aglet Extensions Dialect Tag
(r'\(\*!m2iso\+aglet\*\)', Comment.Special),
# GNU Extensions Dialect Tag
(r'\(\*!m2pim\+gm2\*\)', Comment.Special),
# p1 Extensions Dialect Tag
(r'\(\*!m2iso\+p1\*\)', Comment.Special),
# XDS Extensions Dialect Tag
(r'\(\*!m2iso\+xds\*\)', Comment.Special),
],
'identifiers': [
(r'([a-zA-Z_$][\w$]*)', Name),
],
'prefixed_number_literals': [
#
# Base-2, whole number
(r'0b[01]+(\'[01]+)*', Number.Bin),
#
# Base-16, whole number
(r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
],
'plain_number_literals': [
#
# Base-10, real number with exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*' # fractional part
r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent
Number.Float),
#
# Base-10, real number without exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*', # fractional part
Number.Float),
#
# Base-10, whole number
(r'[0-9]+(\'[0-9]+)*', Number.Integer),
],
'suffixed_number_literals': [
#
# Base-8, whole number
(r'[0-7]+B', Number.Oct),
#
# Base-8, character code
(r'[0-7]+C', Number.Oct),
#
# Base-16, number
(r'[0-9A-F]+H', Number.Hex),
],
'string_literals': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'digraph_operators': [
# Dot Product Operator
(r'\*\.', Operator),
# Array Concatenation Operator
(r'\+>', Operator), # M2R10 + ObjM2
# Inequality Operator
(r'<>', Operator), # ISO + PIM
# Less-Or-Equal, Subset
(r'<=', Operator),
# Greater-Or-Equal, Superset
(r'>=', Operator),
# Identity Operator
(r'==', Operator), # M2R10 + ObjM2
# Type Conversion Operator
(r'::', Operator), # M2R10 + ObjM2
# Assignment Symbol
(r':=', Operator),
# Postfix Increment Mutator
(r'\+\+', Operator), # M2R10 + ObjM2
# Postfix Decrement Mutator
(r'--', Operator), # M2R10 + ObjM2
],
'unigraph_operators': [
# Arithmetic Operators
(r'[+-]', Operator),
(r'[*/]', Operator),
# ISO 80000-2 compliant Set Difference Operator
(r'\\', Operator), # M2R10 + ObjM2
# Relational Operators
(r'[=#<>]', Operator),
# Dereferencing Operator
(r'\^', Operator),
# Dereferencing Operator Synonym
(r'@', Operator), # ISO
# Logical AND Operator Synonym
(r'&', Operator), # PIM + ISO
# Logical NOT Operator Synonym
(r'~', Operator), # PIM + ISO
# Smalltalk Message Prefix
(r'`', Operator), # ObjM2
],
'digraph_punctuation': [
# Range Constructor
(r'\.\.', Punctuation),
# Opening Chevron Bracket
(r'<<', Punctuation), # M2R10 + ISO
# Closing Chevron Bracket
(r'>>', Punctuation), # M2R10 + ISO
# Blueprint Punctuation
(r'->', Punctuation), # M2R10 + ISO
# Distinguish |# and # in M2 R10
(r'\|#', Punctuation),
# Distinguish ## and # in M2 R10
(r'##', Punctuation),
# Distinguish |* and * in M2 R10
(r'\|\*', Punctuation),
],
'unigraph_punctuation': [
# Common Punctuation
(r'[\(\)\[\]{},.:;\|]', Punctuation),
# Case Label Separator Synonym
(r'!', Punctuation), # ISO
# Blueprint Punctuation
(r'\?', Punctuation), # M2R10 + ObjM2
],
'comments': [
# Single Line Comment
(r'^//.*?\n', Comment.Single), # M2R10 + ObjM2
# Block Comment
(r'\(\*([^$].*?)\*\)', Comment.Multiline),
# Template Block Comment
(r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2
],
'pragmas': [
# ISO Style Pragmas
(r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2
# Pascal Style Pragmas
(r'\(\*\$.*?\*\)', Comment.Preproc), # PIM
],
'root': [
include('whitespace'),
include('dialecttags'),
include('pragmas'),
include('comments'),
include('identifiers'),
include('suffixed_number_literals'), # PIM + ISO
include('prefixed_number_literals'), # M2R10 + ObjM2
include('plain_number_literals'),
include('string_literals'),
include('digraph_punctuation'),
include('digraph_operators'),
include('unigraph_punctuation'),
include('unigraph_operators'),
]
}
# C o m m o n D a t a s e t s
# Common Reserved Words Dataset
common_reserved_words = (
# 37 common reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
)
# Common Builtins Dataset
common_builtins = (
# 16 common builtins
'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
'TRUE',
)
# Common Pseudo-Module Builtins Dataset
common_pseudo_builtins = (
# 4 common pseudo builtins
'ADDRESS', 'BYTE', 'WORD', 'ADR'
)
# P I M M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for PIM Modula-2
pim_lexemes_to_reject = (
'!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
'+>', '->', '<<', '>>', '|#', '##',
)
# PIM Modula-2 Additional Reserved Words Dataset
pim_additional_reserved_words = (
# 3 additional reserved words
'EXPORT', 'QUALIFIED', 'WITH',
)
# PIM Modula-2 Additional Builtins Dataset
pim_additional_builtins = (
# 16 additional builtins
'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
)
# PIM Modula-2 Additional Pseudo-Module Builtins Dataset
pim_additional_pseudo_builtins = (
# 5 additional pseudo builtins
'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
)
# I S O M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for ISO Modula-2
iso_lexemes_to_reject = (
'`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
'<<', '>>', '|#', '##',
)
# ISO Modula-2 Additional Reserved Words Dataset
iso_additional_reserved_words = (
# 9 additional reserved words (ISO 10514-1)
'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
'REM', 'RETRY', 'WITH',
# 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
'REVEAL', 'TRACED', 'UNSAFEGUARDED',
)
# ISO Modula-2 Additional Builtins Dataset
iso_additional_builtins = (
# 26 additional builtins (ISO 10514-1)
'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
# 5 additional builtins (ISO 10514-2 & ISO 10514-3)
'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
)
# ISO Modula-2 Additional Pseudo-Module Builtins Dataset
iso_additional_pseudo_builtins = (
# 14 additional builtins (SYSTEM)
'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
# 13 additional builtins (COROUTINES)
'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
'NEWCOROUTINE', 'PROT', 'TRANSFER',
# 9 additional builtins (EXCEPTIONS)
'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
'ExceptionSource', 'GetMessage', 'IsCurrentSource',
'IsExceptionalExecution', 'RAISE',
# 3 additional builtins (TERMINATION)
'TERMINATION', 'IsTerminating', 'HasHalted',
# 4 additional builtins (M2EXCEPTION)
'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
'indexException', 'rangeException', 'caseSelectException',
'invalidLocation', 'functionException', 'wholeValueException',
'wholeDivException', 'realValueException', 'realDivException',
'complexValueException', 'complexDivException', 'protException',
'sysException', 'coException', 'exException',
)
# M o d u l a - 2 R 1 0 D a t a s e t s
# Lexemes to Mark as Error Tokens for Modula-2 R10
m2r10_lexemes_to_reject = (
'!', '`', '@', '$', '%', '&', '<>',
)
# Modula-2 R10 reserved words in addition to the common set
m2r10_additional_reserved_words = (
# 12 additional reserved words
'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
# 2 additional reserved words with symbolic assembly option
'ASM', 'REG',
)
# Modula-2 R10 builtins in addition to the common set
m2r10_additional_builtins = (
# 26 additional builtins
'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
'UNICHAR', 'WRITE', 'WRITEF',
)
# Modula-2 R10 Additional Pseudo-Module Builtins Dataset
m2r10_additional_pseudo_builtins = (
# 13 additional builtins (TPROPERTIES)
'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
'TMAXEXP', 'TMINEXP',
# 4 additional builtins (CONVERSION)
'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
# 35 additional builtins (UNSAFE)
'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
# 11 additional builtins (ATOMIC)
'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
'BWNAND', 'BWOR', 'BWXOR',
# 7 additional builtins (COMPILER)
'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
'HASH',
# 5 additional builtins (ASSEMBLER)
'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
)
# O b j e c t i v e M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for Objective Modula-2
objm2_lexemes_to_reject = (
'!', '$', '%', '&', '<>',
)
# Objective Modula-2 Extensions
# reserved words in addition to Modula-2 R10
objm2_additional_reserved_words = (
# 16 additional reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
)
# Objective Modula-2 Extensions
# builtins in addition to Modula-2 R10
objm2_additional_builtins = (
# 3 additional builtins
'OBJECT', 'NO', 'YES',
)
# Objective Modula-2 Extensions
# pseudo-module builtins in addition to Modula-2 R10
objm2_additional_pseudo_builtins = (
# None
)
# A g l e t M o d u l a - 2 D a t a s e t s
# Aglet Extensions
# reserved words in addition to ISO Modula-2
aglet_additional_reserved_words = (
# None
)
# Aglet Extensions
# builtins in addition to ISO Modula-2
aglet_additional_builtins = (
# 9 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
)
# Aglet Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
aglet_additional_pseudo_builtins = (
# None
)
# G N U M o d u l a - 2 D a t a s e t s
# GNU Extensions
# reserved words in addition to PIM Modula-2
gm2_additional_reserved_words = (
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
)
# GNU Extensions
# builtins in addition to PIM Modula-2
gm2_additional_builtins = (
# 21 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
)
# GNU Extensions
# pseudo-module builtins in addition to PIM Modula-2
gm2_additional_pseudo_builtins = (
# None
)
# p 1 M o d u l a - 2 D a t a s e t s
# p1 Extensions
# reserved words in addition to ISO Modula-2
p1_additional_reserved_words = (
# None
)
# p1 Extensions
# builtins in addition to ISO Modula-2
p1_additional_builtins = (
# None
)
# p1 Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
p1_additional_pseudo_builtins = (
# 1 additional builtin
'BCD',
)
# X D S M o d u l a - 2 D a t a s e t s
# XDS Extensions
# reserved words in addition to ISO Modula-2
xds_additional_reserved_words = (
# 1 additional reserved word
'SEQ',
)
# XDS Extensions
# builtins in addition to ISO Modula-2
xds_additional_builtins = (
# 9 additional builtins
'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
'LONGCARD', 'SHORTCARD', 'SHORTINT',
)
# XDS Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
xds_additional_pseudo_builtins = (
# 22 additional builtins (SYSTEM)
'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
# 3 additional builtins (COMPILER)
'COMPILER', 'OPTION', 'EQUATION'
)
# P I M S t a n d a r d L i b r a r y D a t a s e t s
# PIM Modula-2 Standard Library Modules Dataset
pim_stdlib_module_identifiers = (
'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
)
# PIM Modula-2 Standard Library Types Dataset
pim_stdlib_type_identifiers = (
'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
'DirectoryCommand',
)
# PIM Modula-2 Standard Library Procedures Dataset
pim_stdlib_proc_identifiers = (
'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
)
# PIM Modula-2 Standard Library Variables Dataset
pim_stdlib_var_identifiers = (
'Done', 'termCH', 'in', 'out'
)
# PIM Modula-2 Standard Library Constants Dataset
pim_stdlib_const_identifiers = (
'EOL',
)
# I S O S t a n d a r d L i b r a r y D a t a s e t s
# ISO Modula-2 Standard Library Modules Dataset
iso_stdlib_module_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Types Dataset
iso_stdlib_type_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Procedures Dataset
iso_stdlib_proc_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Variables Dataset
iso_stdlib_var_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Constants Dataset
iso_stdlib_const_identifiers = (
# TO DO
)
# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s
# Modula-2 R10 Standard Library ADTs Dataset
m2r10_stdlib_adt_identifiers = (
'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
'INT64', 'INT128', 'STRING', 'UNISTRING',
)
# Modula-2 R10 Standard Library Blueprints Dataset
m2r10_stdlib_blueprint_identifiers = (
'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
)
# Modula-2 R10 Standard Library Modules Dataset
m2r10_stdlib_module_identifiers = (
'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
)
# Modula-2 R10 Standard Library Types Dataset
m2r10_stdlib_type_identifiers = (
'File', 'Status',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Procedures Dataset
m2r10_stdlib_proc_identifiers = (
'ALLOCATE', 'DEALLOCATE', 'SIZE',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Variables Dataset
m2r10_stdlib_var_identifiers = (
'stdIn', 'stdOut', 'stdErr',
)
# Modula-2 R10 Standard Library Constants Dataset
m2r10_stdlib_const_identifiers = (
'pi', 'tau',
)
# D i a l e c t s
# Dialect modes
dialects = (
'unknown',
'm2pim', 'm2iso', 'm2r10', 'objm2',
'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
)
# D a t a b a s e s
# Lexemes to Mark as Errors Database
lexemes_to_reject_db = {
# Lexemes to reject for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Lexemes to reject for PIM Modula-2
'm2pim': (
pim_lexemes_to_reject,
),
# Lexemes to reject for ISO Modula-2
'm2iso': (
iso_lexemes_to_reject,
),
# Lexemes to reject for Modula-2 R10
'm2r10': (
m2r10_lexemes_to_reject,
),
# Lexemes to reject for Objective Modula-2
'objm2': (
objm2_lexemes_to_reject,
),
# Lexemes to reject for Aglet Modula-2
'm2iso+aglet': (
iso_lexemes_to_reject,
),
# Lexemes to reject for GNU Modula-2
'm2pim+gm2': (
pim_lexemes_to_reject,
),
# Lexemes to reject for p1 Modula-2
'm2iso+p1': (
iso_lexemes_to_reject,
),
# Lexemes to reject for XDS Modula-2
'm2iso+xds': (
iso_lexemes_to_reject,
),
}
# Reserved Words Database
reserved_words_db = {
# Reserved words for unknown dialect
'unknown': (
common_reserved_words,
pim_additional_reserved_words,
iso_additional_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for PIM Modula-2
'm2pim': (
common_reserved_words,
pim_additional_reserved_words,
),
# Reserved words for Modula-2 R10
'm2iso': (
common_reserved_words,
iso_additional_reserved_words,
),
# Reserved words for ISO Modula-2
'm2r10': (
common_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for Objective Modula-2
'objm2': (
common_reserved_words,
m2r10_additional_reserved_words,
objm2_additional_reserved_words,
),
# Reserved words for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_reserved_words,
iso_additional_reserved_words,
aglet_additional_reserved_words,
),
# Reserved words for GNU Modula-2 Extensions
'm2pim+gm2': (
common_reserved_words,
pim_additional_reserved_words,
gm2_additional_reserved_words,
),
# Reserved words for p1 Modula-2 Extensions
'm2iso+p1': (
common_reserved_words,
iso_additional_reserved_words,
p1_additional_reserved_words,
),
# Reserved words for XDS Modula-2 Extensions
'm2iso+xds': (
common_reserved_words,
iso_additional_reserved_words,
xds_additional_reserved_words,
),
}
# Builtins Database
builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_builtins,
pim_additional_builtins,
iso_additional_builtins,
m2r10_additional_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_builtins,
pim_additional_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_builtins,
iso_additional_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_builtins,
m2r10_additional_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_builtins,
m2r10_additional_builtins,
objm2_additional_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_builtins,
iso_additional_builtins,
aglet_additional_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_builtins,
pim_additional_builtins,
gm2_additional_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_builtins,
iso_additional_builtins,
p1_additional_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_builtins,
iso_additional_builtins,
xds_additional_builtins,
),
}
# Pseudo-Module Builtins Database
pseudo_builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
iso_additional_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
objm2_additional_pseudo_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
aglet_additional_pseudo_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
gm2_additional_pseudo_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
p1_additional_pseudo_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
xds_additional_pseudo_builtins,
),
}
# Standard Library ADTs Database
stdlib_adts_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library ADTs for PIM Modula-2
'm2pim': (
# No first class library types
),
# Standard Library ADTs for ISO Modula-2
'm2iso': (
# No first class library types
),
# Standard Library ADTs for Modula-2 R10
'm2r10': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Objective Modula-2
'objm2': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Aglet Modula-2
'm2iso+aglet': (
# No first class library types
),
# Standard Library ADTs for GNU Modula-2
'm2pim+gm2': (
# No first class library types
),
# Standard Library ADTs for p1 Modula-2
'm2iso+p1': (
# No first class library types
),
# Standard Library ADTs for XDS Modula-2
'm2iso+xds': (
# No first class library types
),
}
# Standard Library Modules Database
stdlib_modules_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Modules for PIM Modula-2
'm2pim': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for ISO Modula-2
'm2iso': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for Modula-2 R10
'm2r10': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
m2r10_stdlib_adt_identifiers,
),
# Standard Library Modules for Objective Modula-2
'objm2': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
),
# Standard Library Modules for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for p1 Modula-2
'm2iso+p1': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for XDS Modula-2
'm2iso+xds': (
iso_stdlib_module_identifiers,
),
}
# Standard Library Types Database
stdlib_types_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Types for PIM Modula-2
'm2pim': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for ISO Modula-2
'm2iso': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for Modula-2 R10
'm2r10': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Objective Modula-2
'objm2': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for p1 Modula-2
'm2iso+p1': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for XDS Modula-2
'm2iso+xds': (
iso_stdlib_type_identifiers,
),
}
# Standard Library Procedures Database
stdlib_procedures_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Procedures for PIM Modula-2
'm2pim': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for ISO Modula-2
'm2iso': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for Modula-2 R10
'm2r10': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Objective Modula-2
'objm2': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for p1 Modula-2
'm2iso+p1': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for XDS Modula-2
'm2iso+xds': (
iso_stdlib_proc_identifiers,
),
}
# Standard Library Variables Database
stdlib_variables_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Variables for PIM Modula-2
'm2pim': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for ISO Modula-2
'm2iso': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for Modula-2 R10
'm2r10': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Objective Modula-2
'objm2': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for p1 Modula-2
'm2iso+p1': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for XDS Modula-2
'm2iso+xds': (
iso_stdlib_var_identifiers,
),
}
# Standard Library Constants Database
stdlib_constants_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Constants for PIM Modula-2
'm2pim': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for ISO Modula-2
'm2iso': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for Modula-2 R10
'm2r10': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Objective Modula-2
'objm2': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for p1 Modula-2
'm2iso+p1': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for XDS Modula-2
'm2iso+xds': (
iso_stdlib_const_identifiers,
),
}
# M e t h o d s
# initialise a lexer instance
def __init__(self, **options):
#
# check dialect options
#
dialects = get_list_opt(options, 'dialect', [])
#
for dialect_option in dialects:
if dialect_option in self.dialects[1:-1]:
# valid dialect option found
self.set_dialect(dialect_option)
break
#
# Fallback Mode (DEFAULT)
else:
# no valid dialect option
self.set_dialect('unknown')
#
self.dialect_set_by_tag = False
#
# check style options
#
styles = get_list_opt(options, 'style', [])
#
# use lowercase mode for Algol style
if 'algol' in styles or 'algol_nu' in styles:
self.algol_publication_mode = True
else:
self.algol_publication_mode = False
#
# Check option flags
#
self.treat_stdlib_adts_as_builtins = get_bool_opt(
options, 'treat_stdlib_adts_as_builtins', True)
#
# call superclass initialiser
RegexLexer.__init__(self, **options)
# Set lexer to a specified dialect
def set_dialect(self, dialect_id):
#
# if __debug__:
# print 'entered set_dialect with arg: ', dialect_id
#
# check dialect name against known dialects
if dialect_id not in self.dialects:
dialect = 'unknown' # default
else:
dialect = dialect_id
#
# compose lexemes to reject set
lexemes_to_reject_set = set()
# add each list of reject lexemes for this dialect
for list in self.lexemes_to_reject_db[dialect]:
lexemes_to_reject_set.update(set(list))
#
# compose reserved words set
reswords_set = set()
# add each list of reserved words for this dialect
for list in self.reserved_words_db[dialect]:
reswords_set.update(set(list))
#
# compose builtins set
builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.builtins_db[dialect]:
builtins_set.update(set(list).difference(reswords_set))
#
# compose pseudo-builtins set
pseudo_builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.pseudo_builtins_db[dialect]:
pseudo_builtins_set.update(set(list).difference(reswords_set))
#
# compose ADTs set
adts_set = set()
# add each list of ADTs for this dialect excluding reserved words
for list in self.stdlib_adts_db[dialect]:
adts_set.update(set(list).difference(reswords_set))
#
# compose modules set
modules_set = set()
# add each list of builtins for this dialect excluding builtins
for list in self.stdlib_modules_db[dialect]:
modules_set.update(set(list).difference(builtins_set))
#
# compose types set
types_set = set()
# add each list of types for this dialect excluding builtins
for list in self.stdlib_types_db[dialect]:
types_set.update(set(list).difference(builtins_set))
#
# compose procedures set
procedures_set = set()
# add each list of procedures for this dialect excluding builtins
for list in self.stdlib_procedures_db[dialect]:
procedures_set.update(set(list).difference(builtins_set))
#
# compose variables set
variables_set = set()
# add each list of variables for this dialect excluding builtins
for list in self.stdlib_variables_db[dialect]:
variables_set.update(set(list).difference(builtins_set))
#
# compose constants set
constants_set = set()
# add each list of constants for this dialect excluding builtins
for list in self.stdlib_constants_db[dialect]:
constants_set.update(set(list).difference(builtins_set))
#
# update lexer state
self.dialect = dialect
self.lexemes_to_reject = lexemes_to_reject_set
self.reserved_words = reswords_set
self.builtins = builtins_set
self.pseudo_builtins = pseudo_builtins_set
self.adts = adts_set
self.modules = modules_set
self.types = types_set
self.procedures = procedures_set
self.variables = variables_set
self.constants = constants_set
#
# if __debug__:
# print 'exiting set_dialect'
# print ' self.dialect: ', self.dialect
# print ' self.lexemes_to_reject: ', self.lexemes_to_reject
# print ' self.reserved_words: ', self.reserved_words
# print ' self.builtins: ', self.builtins
# print ' self.pseudo_builtins: ', self.pseudo_builtins
# print ' self.adts: ', self.adts
# print ' self.modules: ', self.modules
# print ' self.types: ', self.types
# print ' self.procedures: ', self.procedures
# print ' self.variables: ', self.variables
# print ' self.types: ', self.types
# print ' self.constants: ', self.constants
# Extracts a dialect name from a dialect tag comment string and checks
# the extracted name against known dialects. If a match is found, the
# matching name is returned, otherwise dialect id 'unknown' is returned
def get_dialect_from_dialect_tag(self, dialect_tag):
#
# if __debug__:
# print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
#
# constants
left_tag_delim = '(*!'
right_tag_delim = '*)'
left_tag_delim_len = len(left_tag_delim)
right_tag_delim_len = len(right_tag_delim)
indicator_start = left_tag_delim_len
indicator_end = -(right_tag_delim_len)
#
# check comment string for dialect indicator
if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
and dialect_tag.startswith(left_tag_delim) \
and dialect_tag.endswith(right_tag_delim):
#
# if __debug__:
# print 'dialect tag found'
#
# extract dialect indicator
indicator = dialect_tag[indicator_start:indicator_end]
#
# if __debug__:
# print 'extracted: ', indicator
#
# check against known dialects
for index in range(1, len(self.dialects)):
#
# if __debug__:
# print 'dialects[', index, ']: ', self.dialects[index]
#
if indicator == self.dialects[index]:
#
# if __debug__:
# print 'matching dialect found'
#
# indicator matches known dialect
return indicator
else:
# indicator does not match any dialect
return 'unknown' # default
else:
# invalid indicator string
return 'unknown' # default
# intercept the token stream, modify token attributes and return them
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
#
# check for dialect tag if dialect has not been set by tag
if not self.dialect_set_by_tag and token == Comment.Special:
indicated_dialect = self.get_dialect_from_dialect_tag(value)
if indicated_dialect != 'unknown':
# token is a dialect indicator
# reset reserved words and builtins
self.set_dialect(indicated_dialect)
self.dialect_set_by_tag = True
#
# check for reserved words, predefined and stdlib identifiers
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.builtins:
token = Name.Builtin
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.pseudo_builtins:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.adts:
if not self.treat_stdlib_adts_as_builtins:
token = Name.Namespace
else:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.modules:
token = Name.Namespace
#
elif value in self.types:
token = Name.Class
#
elif value in self.procedures:
token = Name.Function
#
elif value in self.variables:
token = Name.Variable
#
elif value in self.constants:
token = Name.Constant
#
elif token in Number:
#
# mark prefix number literals as error for PIM and ISO dialects
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
if "'" in value or value[0:2] in ('0b', '0x', '0u'):
token = Error
#
elif self.dialect in ('m2r10', 'objm2'):
# mark base-8 number literals as errors for M2 R10 and ObjM2
if token is Number.Oct:
token = Error
# mark suffix base-16 literals as errors for M2 R10 and ObjM2
elif token is Number.Hex and 'H' in value:
token = Error
# mark real numbers with E as errors for M2 R10 and ObjM2
elif token is Number.Float and 'E' in value:
token = Error
#
elif token in Comment:
#
# mark single line comment as error for PIM and ISO dialects
if token is Comment.Single:
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
token = Error
#
if token is Comment.Preproc:
# mark ISO pragma as error for PIM dialects
if value.startswith('<*') and \
self.dialect.startswith('m2pim'):
token = Error
# mark PIM pragma as comment for other dialects
elif value.startswith('(*$') and \
self.dialect != 'unknown' and \
not self.dialect.startswith('m2pim'):
token = Comment.Multiline
#
else: # token is neither Name nor Comment
#
# mark lexemes matching the dialect's error token set as errors
if value in self.lexemes_to_reject:
token = Error
#
# substitute lexemes when in Algol mode
if self.algol_publication_mode:
if value == '#':
value = u'≠'
elif value == '<=':
value = u'≤'
elif value == '>=':
value = u'≥'
elif value == '==':
value = u'≡'
elif value == '*.':
value = u'•'
# return result
yield index, token, value
| mit |
LongSeanSilvr/DC_Metro_Tracker | development_version/src/general_intents.py | 1 | 1923 | import build_response as br
# ======================================================================================================================
# Skill Behavior: Welcome Response
# ======================================================================================================================
class Welcome(object):
def __init__(self):
self.card_title = "Welcome"
self.reprompt_text = "What station would you like train times for?"
self.flag = "welcome"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Help
# ======================================================================================================================
class Help(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Help"
self.reprompt_text = "What station would you like train times for?"
self.flag = "help"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Quit
# ======================================================================================================================
class Exit(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Exiting"
self.flag = "exit"
def build_response(self):
output = br.build_response(self.card_title, self.flag)
return output
| gpl-3.0 |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/command_lib/ml/predict_utilities.py | 3 | 3913 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading instances for prediction."""
import json
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import resources
from googlecloudsdk.core.util import files
class InvalidInstancesFileError(core_exceptions.Error):
"""Indicates that the input file was invalid in some way."""
pass
def ReadInstances(input_file, data_format, limit=None):
"""Reads the instances from input file.
Args:
input_file: An open file-like object for the input file.
data_format: str, data format of the input file, 'json' or 'text'.
limit: int, the maximum number of instances allowed in the file
Returns:
A list of instances.
Raises:
InvalidInstancesFileError: If the input file is invalid (invalid format or
contains too many/zero instances).
"""
instances = []
for line_num, line in enumerate(input_file):
line_content = line.rstrip('\n')
if not line_content:
raise InvalidInstancesFileError('Empty line is not allowed in the '
'instances file.')
if limit and line_num >= limit:
raise InvalidInstancesFileError(
'Online prediction can process no more than ' + str(limit) +
' instances per file. Please use batch prediction instead.')
if data_format == 'json':
try:
instances.append(json.loads(line_content))
except ValueError:
raise InvalidInstancesFileError(
'Input instances are not in JSON format. '
'See "gcloud beta ml predict --help" for details.')
elif data_format == 'text':
instances.append(line_content)
if not instances:
raise InvalidInstancesFileError('No valid instance was found.')
return instances
def ReadInstancesFromArgs(json_instances, text_instances, limit=None):
"""Reads the instances from the given file path ('-' for stdin).
Exactly one of json_instances, text_instances must be given.
Args:
json_instances: str or None, a path to a file ('-' for stdin) containing
instances in JSON format.
text_instances: str or None, a path to a file ('-' for stdin) containing
instances in text format.
limit: int, the maximum number of instances allowed in the file
Returns:
A list of instances.
Raises:
InvalidInstancesFileError: If the input file is invalid (invalid format or
contains too many/zero instances), or an improper combination of input
files was given.
"""
if (json_instances and text_instances or
not (json_instances or text_instances)):
raise InvalidInstancesFileError(
'Exactly one of --json-instances and --text-instances must be '
'specified.')
if json_instances:
data_format = 'json'
input_file = json_instances
elif text_instances:
data_format = 'text'
input_file = text_instances
with files.Open(input_file) as f:
return ReadInstances(f, data_format, limit=limit)
def ParseModelOrVersionRef(model_id, version_id):
if version_id:
return resources.REGISTRY.Parse(version_id,
collection='ml.projects.models.versions',
params={'modelsId': model_id})
else:
return resources.REGISTRY.Parse(model_id, collection='ml.projects.models')
| mit |
jaredculp/faker | faker/providers/address/es/__init__.py | 15 | 3305 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
## List of Countries https://www.un.org/es/members/
countries = (
'Afganistán', 'Albania', 'Alemania', 'Andorra', 'Angola',
'Antigua y Barbuda', 'Arabia Saudita', 'Argelia', 'Argentina',
'Armenia', 'Australia', 'Austria', 'Azerbaiyán',
'Bahamas', 'Bahrein', 'Bangladesh', 'Barbados', 'Belarús',
'Bélgica', 'Belice', 'Benin', 'Bhután', 'Bolivia',
'Bosnia y Herzegovina', 'Botswana', 'Brasil', 'Brunei Darussalam',
'Bulgaria', 'Burkina Faso', 'Burundi', 'Cabo Verde', 'Camboya',
'Camerún', 'Canadá', 'Chad', 'Chile', 'China', 'Chipre','Colombia',
'Comoras', 'Congo', 'Costa Rica', 'Côte d\'Ivoire', 'Croacia',
'Cuba', 'Dinamarca', 'Djibouti', 'Dominicana', 'Ecuador', 'Egipto',
'El Salvador', 'Emiratos Árabes Unidos', 'Eritrea', 'Eslovaquia',
'Eslovenia', 'España', 'Estados Unidos de América', 'Estonia',
'Etiopía', 'ex República Yugoslava de Macedonia',
'Federación de Rusia', 'Fiji', 'Filipinas', 'Finlandia', 'Francia',
'Gabón', 'Gambia', 'Georgia', 'Ghana', 'Granada', 'Grecia',
'Guatemala', 'Guinea', 'Guinea Bissau', 'Guinea Ecuatorial',
'Guyana', 'Haití', 'Honduras', 'Hungría', 'India', 'Indonesia',
'Irán', 'Iraq', 'Irlanda', 'Islandia', 'Islas Marshall',
'Islas Salomón', 'Israel', 'Italia', 'Jamaica', 'Japón',
'Jordania', 'Kazajstán', 'Kenya', 'Kirguistán', 'Kiribati',
'Kuwait', 'Lesotho', 'Letonia', 'Líbano', 'Liberia', 'Libia',
'Liechtenstein', 'Lituania', 'Luxemburgo', 'Madagascar',
'Malasia', 'Malawi', 'Maldivas', 'Mali', 'Malta','Marruecos',
'Mauricio', 'Mauritania', 'México', 'Micronesia', 'Mónaco',
'Mongolia', 'Montenegro','Mozambique','Myanmar', 'Namibia',
'Nauru', 'Nicaragua', 'Niger', 'Nigeria', 'Noruega',
'Nueva Zelandia', 'Omán', 'Países Bajos', 'Pakistán', 'Palau',
'Panamá', 'Papua Nueva Guinea', 'Paraguay', 'Perú', 'Polonia',
'Portugal', 'Qatar',
'Reino Unido de Gran Bretaña e Irlanda del Norte',
'República Árabe Siria', 'República Centroafricana',
'República Checa', 'República de Corea', 'República de Moldova',
'República Democrática del Congo',
'República Democrática Popular Lao',
'República Dominicana',
'República Federal Democrática de Nepal',
'República Popular Democrática de Corea',
'República Unida de Tanzanía', 'Rumania', 'Rwanda',
'Saint Kitts y Nevis', 'Samoa', 'San Marino', 'Santa Lucía',
'Santo Tomé y Príncipe', 'San Vicente y las Granadinas',
'Senegal', 'Serbia', 'Seychelles', 'Sierra Leona', 'Singapur',
'Somalia', 'Sri Lanka', 'Sudáfrica', 'Sudán', 'Sudán del Sur',
'Suecia', 'Suiza', 'Suriname', 'Swazilandia', 'Tailandia',
'Tayikistán', 'Timor-Leste', 'Togo', 'Tonga', 'Trinidad y Tabago',
'Túnez', 'Turkmenistán', 'Turquía', 'Tuvalu', 'Ucrania', 'Uganda',
'Uruguay', 'Uzbekistán', 'Vanuatu', 'Venezuela', 'Vietman',
'Yemen', 'Zambia', 'Zimbabwe'
)
| mit |
GNOME/libgxps | regtest/TestReferences.py | 1 | 3535 | # TestReferences.py
#
# Copyright (C) 2011 Carlos Garcia Campos <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import errno
from Test import Test
from Config import Config
from Printer import get_printer
from Utils import get_document_paths_from_dir, get_skipped_tests
from Queue import Queue
from threading import Thread, RLock
class TestReferences:
def __init__(self, docsdir, refsdir):
self._docsdir = docsdir
self._refsdir = refsdir
self._skipped = get_skipped_tests(docsdir)
self._test = Test()
self.config = Config()
self.printer = get_printer()
self._total_tests = 1
self._n_tests = 0
self._queue = Queue()
self._lock = RLock()
try:
os.makedirs(self._refsdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
def create_refs_for_file(self, filename):
if filename in self._skipped:
with self._lock:
self._n_tests += 1
self.printer.print_default("Skipping test '%s'" % (os.path.join(self._docsdir, filename)))
return
refs_path = os.path.join(self._refsdir, filename)
try:
os.makedirs(refs_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
doc_path = os.path.join(self._docsdir, filename)
if not self.config.force and self._test.has_results(refs_path):
with self._lock:
self._n_tests += 1
self.printer.print_default("Results found, skipping '%s'" % doc_path)
return
if self._test.create_refs(doc_path, refs_path):
self._test.create_checksums(refs_path, self.config.checksums_only)
with self._lock:
self._n_tests += 1
self.printer.printout_ln("[%d/%d] %s: done" % (self._n_tests, self._total_tests, doc_path))
def _worker_thread(self):
while True:
doc = self._queue.get()
self.create_refs_for_file(doc)
self._queue.task_done()
def create_refs(self):
docs, total_docs = get_document_paths_from_dir(self._docsdir)
self._total_tests = total_docs
self.printer.printout_ln('Found %d documents' % (total_docs))
self.printer.printout_ln('Process %d using %d worker threads' % (os.getpid(), self.config.threads))
self.printer.printout_ln()
self.printer.printout('Spawning %d workers...' % (self.config.threads))
for n_thread in range(self.config.threads):
thread = Thread(target=self._worker_thread)
thread.daemon = True
thread.start()
for doc in docs:
self._queue.put(doc)
self._queue.join()
| lgpl-2.1 |
HyechurnJang/archon | archon/view/core.py | 2 | 10791 | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import re
import uuid
class TAG(dict):
@classmethod
def ATTR(cls, attrs, **sets):
for k in sets: attrs[k] = '%s %s' % (sets[k], attrs[k]) if k in attrs else sets[k]
return attrs
@classmethod
def UUID(cls):
return 'V-' + str(uuid.uuid4())
def __init__(self, tag, **attrs):
dict.__init__(self, tag=tag, elems=[], attrs=attrs)
def __len__(self, *args, **kwargs):
return self['elems'].__len__()
def __str__(self):
return self.render()
def click(self, url):
if 'CLASS' in self['attrs']: self['attrs']['CLASS'] += ' clickable'
else: self['attrs']['CLASS'] = 'clickable'
self['attrs']['onclick'] = "GetData('%s');" % url
return self
def html(self, *elems):
for elem in elems: self['elems'].append(elem)
return self
def empty(self):
return not self['elems'].__len__()
def render(self):
tag = self['tag']
attrs = self['attrs']
elems = self['elems']
attr_str = '';
for k in attrs: attr_str += ' %s="%s"' % (k, attrs[k])
elem_str = ''
for elem in elems: elem_str += str(elem)
return '<%s%s>%s</%s>' % (tag, attr_str, elem_str, tag)
class DIV(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'div', **attrs)
class SPAN(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'span', **attrs)
class HEAD(TAG):
def __init__(self, level, **attrs): TAG.__init__(self, 'h' + str(level), **attrs)
class PARA(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'p', **TAG.ATTR(attrs, CLASS='para'))
class ANCH(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'a', **attrs)
class LABEL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'label', **attrs)
class STRONG(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'strong', **attrs)
class SMALL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'small', **attrs)
class IMG(TAG):
def __init__(self, src, **attrs): TAG.__init__(self, 'img', **TAG.ATTR(attrs, src=src))
class ICON(TAG):
def __init__(self, icon, **attrs): TAG.__init__(self, 'i', **TAG.ATTR(attrs, CLASS='fa fa-%s' % icon))
class THEAD(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'thead', **attrs)
class TBODY(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'tbody', **attrs)
class TH(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'th', **attrs)
class TR(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'tr', **attrs)
class TD(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'td', **attrs)
class TABLE(TAG):
class BASIC(TAG):
def __init__(self, *heads, **options):
TAG.__init__(self, 'TABLE', ID=TAG.UUID(), CLASS='table table-bordered table-hover', LIB='table_basic', **{'width':'100%'})
self.body = TBODY()
self['options'] = options
tr = TR()
order = [None for i in range(0, len(heads))]
for i in range(0, len(heads)):
head = heads[i]
kv = re.match('.+\<(?P<p>\d+)(?P<d>(\+|\-))\>$', head)
if kv:
p = int(kv.group('p'))
d = kv.group('d')
if d == '+': order[p] = [i, 'asc']
else: order[p] = [i, 'desc']
head = head.replace('<%d%s>' % (p, d), '')
tr.html(TH().html(head))
order = filter(None, order)
if order: self['options']['order'] = order
else: self['options']['order'] = [[0, 'asc']]
self.html(THEAD().html(tr)).html(self.body)
def Record(self, *cols, **attrs):
tr = TR(**attrs)
for col in cols: tr.html(TD().html(col))
self.body.html(tr)
return self
def __len__(self, *args, **kwargs):
return self.body.__len__()
class ASYNC(TAG):
@classmethod
def pageview(cls):
def wrapper(view):
def decofunc(r, m, v):
r.Draw = int(r.Query['draw'][0]) if isinstance(r.Query['draw'], list) else int(r.Query['draw'])
r.Length = int(r.Query['length'][0]) if isinstance(r.Query['length'], list) else int(r.Query['length'])
r.Start = int(r.Query['start'][0]) if isinstance(r.Query['start'], list) else int(r.Query['start'])
try:
r.OrderCol = int(r.Query['order[0][column]'][0]) if isinstance(r.Query['order[0][column]'], list) else int(r.Query['order[0][column]'])
r.OrderDir = int(r.Query['order[0][dir]'][0]) if isinstance(r.Query['order[0][dir]'], list) else int(r.Query['order[0][dir]'])
r.Search = int(r.Query['search[value]'][0]) if isinstance(r.Query['search[value]'], list) else int(r.Query['search[value]'])
except: pass
r.Page = r.Start / r.Length
return view(r, m, v)
return decofunc
return wrapper
def __init__(self, url, *heads, **attrs):
TAG.__init__(self, 'TABLE', **TAG.ATTR(attrs, ID=TAG.UUID(), CLASS='table table-bordered table-hover', LIB='table_async', **{'width':'100%', 'url':url}))
tr = TR()
for head in heads: tr.html(TH().html(head))
self.html(THEAD().html(tr))
class ASYNCDATA(dict):
def __init__(self, draw, total, count):
dict.__init__(self, draw=draw, recordsTotal=total, recordsFiltered=count)
self.data = []
self['data'] = self.data
def Record(self, *cols, **attrs):
self.data.append([str(col) for col in cols])
return self
class FLIP(TAG):
def __init__(self, *heads, **attrs):
TAG.__init__(self, 'TABLE', **TAG.ATTR(attrs, ID=TAG.UUID(), CLASS='table', LIB='table_flip', **{'data-show-toggle':'true', 'data-paging':'true', 'width':'100%'}))
self.body = TBODY()
tr = TR()
for head in heads:
if '+' in head: tr.html(TH(**{'data-type':'html', 'data-breakpoints':'all', 'data-title':head.replace('+', '')}).html(head))
else: tr.html(TH(**{'data-type':'html'}).html(head))
self.html(THEAD().html(tr)).html(self.body)
def Record(self, *cols, **attrs):
tr = TR(**attrs)
for col in cols: tr.html(TD(**{'data-type':'html'}).html(col))
self.body.html(tr)
return self
def __len__(self, *args, **kwargs):
return self.body.__len__()
def __init__(self, **attrs): TAG.__init__(self, 'table', **attrs)
class UL(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'ul', **attrs)
class LI(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'li', **attrs)
class FORM(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'form', **attrs)
class INPUT(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'input', **attrs)
class SELECT(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'select', **attrs)
class OPTION(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'option', **attrs)
class BUTTON(TAG):
def __init__(self, **attrs): TAG.__init__(self, 'button', **TAG.ATTR(attrs, CLASS='btn', TYPE='button'))
| apache-2.0 |
chouseknecht/ansible | test/units/modules/network/f5/test_bigip_firewall_dos_profile.py | 22 | 3200 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_dos_profile import ModuleParameters
from library.modules.bigip_firewall_dos_profile import ModuleManager
from library.modules.bigip_firewall_dos_profile import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleManager
from ansible.modules.network.f5.bigip_firewall_dos_profile import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
threshold_sensitivity='low',
default_whitelist='whitelist1'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.threshold_sensitivity == 'low'
assert p.default_whitelist == '/Common/whitelist1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
threshold_sensitivity='low',
default_whitelist='whitelist1',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 |
dakrauth/picker | picker/forms.py | 1 | 6144 | from django import forms
from django.utils import timezone
from django.utils.module_loading import import_string
from . import models as picker
from . import utils
_picker_widget = None
encoded_game_key = 'game_{}'.format
TIE_KEY = '__TIE__'
def decoded_game_key(value):
return int(value.replace('game_', ''))
def encoded_game_item(game):
return (
encoded_game_key(game.id),
str(game.winner.id) if game.winner else (TIE_KEY if game.is_tie else '')
)
def get_picker_widget(league):
global _picker_widget
if not _picker_widget:
widget_path = league.config('TEAM_PICKER_WIDGET')
if widget_path:
_picker_widget = import_string(widget_path)
_picker_widget = _picker_widget or forms.RadioSelect
return _picker_widget
class GameField(forms.ChoiceField):
def __init__(self, game, manage=False, widget=None):
choices = [(str(game.away.id), game.away), (str(game.home.id), game.home)]
if manage:
choices.insert(1, (TIE_KEY, ''))
self.game = game
self.manage = manage
self.game_id = game.id
self.is_game = True
super(GameField, self).__init__(
choices=choices,
label=game.start_time.strftime('%a, %b %d %I:%M %p'),
required=False,
help_text=game.tv,
disabled=not self.manage and (self.game.start_time <= timezone.now()),
widget=widget or get_picker_widget(game.gameset.league)
)
class FieldIter:
def __init__(self, form):
self.fields = []
self.form = form
def append(self, name):
self.fields.append(name)
def __iter__(self):
for name in self.fields:
yield self.form[name]
class BasePickForm(forms.Form):
management = False
def __init__(self, gameset, *args, **kws):
super(BasePickForm, self).__init__(*args, **kws)
self.gameset = gameset
self.game_fields = FieldIter(self)
games = list(gameset.games.select_related('home__league', 'away__league'))
if games:
for gm in games:
key = encoded_game_key(gm.id)
self.fields[key] = GameField(gm, self.management)
self.game_fields.append(key)
self.fields['points'] = forms.IntegerField(
label='{}:'.format(games[-1].vs_description),
required=False
)
class ManagementPickForm(BasePickForm):
management = True
def __init__(self, gameset, *args, **kws):
kws.setdefault('initial', {}).update(**self.get_initial_picks(gameset))
super(ManagementPickForm, self).__init__(gameset, *args, **kws)
def save(self):
gameset = self.gameset
data = self.cleaned_data.copy()
gameset.points = data.pop('points', 0) or 0
gameset.save()
for key, winner in data.items():
if winner:
pk = decoded_game_key(key)
game = gameset.games.get(pk=pk)
game.winner = None if winner == TIE_KEY else int(winner)
gameset.update_pick_status()
@staticmethod
def get_initial_picks(gameset):
return dict({
encoded_game_key(game.id): str(game.winner.id)
for game in gameset.games.played()
if game.winner
}, points=gameset.points)
class UserPickForm(BasePickForm):
def __init__(self, user, gameset, *args, **kws):
initial = self.get_initial_user_picks(gameset, user)
kws.setdefault('initial', {}).update(initial)
self.user = user
super(UserPickForm, self).__init__(gameset, *args, **kws)
def save(self):
data = self.cleaned_data.copy()
picks = picker.PickSet.objects.for_gameset_user(self.gameset, self.user)
points = data.pop('points', None)
games = {decoded_game_key(k): v for k, v in data.items() if v}
picks.update_picks(games=games, points=points)
return picks
@staticmethod
def get_initial_user_picks(gameset, user):
ps = gameset.pick_for_user(user)
initial = dict({
encoded_game_key(g_id): str(w_id) for g_id, w_id in ps.gamepicks.picked_winner_ids()
}, points=ps.points) if ps else {}
return initial
class GameForm(forms.ModelForm):
class Meta:
model = picker.Game
fields = ('start_time', 'location')
class PreferenceForm(forms.ModelForm):
class Meta:
model = picker.Preference
fields = ('autopick',)
def __init__(self, instance, *args, **kws):
kws['instance'] = instance
self.current_email = instance.user.email.lower()
kws.setdefault('initial', {})['email'] = self.current_email
super(PreferenceForm, self).__init__(*args, **kws)
for league in picker.League.objects.all():
field_name = '{}_favorite'.format(league.slug)
current = None
if instance:
try:
current = picker.PickerFavorite.objects.get(user=instance.user, league=league)
except picker.PickerFavorite.DoesNotExist:
pass
self.fields[field_name] = forms.ModelChoiceField(
picker.Team.objects.filter(league=league),
label='{} Favorite'.format(league.abbr.upper()),
empty_label='-- Select --',
required=False,
initial=current.team if current else None
)
def save(self, commit=True):
super(PreferenceForm, self).save(commit)
if commit:
picker.PickerFavorite.objects.filter(user=self.instance.user).delete()
for key in self.cleaned_data:
if not key.endswith('_favorite'):
continue
slug = key.rsplit('_')[0]
league = picker.League.objects.get(slug=slug)
picker.PickerFavorite.objects.create(
league=league,
user=self.instance.user,
team=self.cleaned_data[key]
)
| mit |
waytai/odoo | addons/crm/report/report_businessopp.py | 377 | 6269 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os, time
import random
import StringIO
from openerp.report.render import render
from openerp.report.interface import report_int
from pychart import *
theme.use_color = 1
class external_pdf(render):
""" Generate External PDF """
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type = 'pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
""" Create Custom Report """
def create(self, cr, uid, ids, datas, context=None):
""" @param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of IDs
@param context: A standard dictionary for contextual values """
assert len(ids), 'You should provide some ids!'
responsible_data = {}
responsible_names = {}
data = []
minbenef = 999999999999999999999
maxbenef = 0
cr.execute('select probability, planned_revenue, planned_cost, user_id,\
res_users.name as name from crm_case left join res_users on \
(crm_case.user_id=res_users.id) where crm_case.id IN %s order by user_id',(tuple(ids),))
res = cr.dictfetchall()
for row in res:
proba = row['probability'] or 0 / 100.0
cost = row['planned_cost'] or 0
revenue = row['planned_revenue'] or 0
userid = row['user_id'] or 0
benefit = revenue - cost
if benefit > maxbenef:
maxbenef = benefit
if benefit < minbenef:
minbenef = benefit
tuple_benefit = (proba * 100, benefit)
responsible_data.setdefault(userid, [])
responsible_data[userid].append(tuple_benefit)
tuple_benefit = (proba * 100, cost, benefit)
data.append(tuple_benefit)
responsible_names[userid] = (row['name'] or '/').replace('/','//')
minbenef -= maxbenef * 0.05
maxbenef *= 1.2
ratio = 0.5
minmaxdiff2 = (maxbenef - minbenef)/2
for l in responsible_data.itervalues():
for i in range(len(l)):
percent, benef = l[i]
proba = percent/100
current_ratio = 1 + (ratio-1) * proba
newbenef = minmaxdiff2 + ((benef - minbenef - minmaxdiff2) * current_ratio)
l[i] = (percent, newbenef)
#TODO:
#-group by "categorie de probabilites ds graphe du haut"
#-echelle variable
pdf_string = StringIO.StringIO()
can = canvas.init(fname = pdf_string, format = 'pdf')
chart_object.set_defaults(line_plot.T, line_style=None)
xaxis = axis.X(label=None, format="%d%%", tic_interval=20)
yaxis = axis.Y()
x_range_a, x_range_b = (0, 100)
y_range_a, y_range_b = (minbenef, maxbenef)
if y_range_a == 0.0:
y_range_a += 0.0001
ar = area.T(
size = (300,200),
y_grid_interval = 10000,
y_grid_style = None,
x_range = (x_range_a, x_range_b),
y_range = (y_range_a, y_range_b),
x_axis = xaxis,
y_axis = None,
legend = legend.T()
)
#import pydb; pydb.debugger()
for k, d in responsible_data.iteritems():
fill = fill_style.Plain(bgcolor=color.T(r=random.random(), g=random.random(), b=random.random()))
tick = tick_mark.Square(size=6, fill_style=fill)
ar.add_plot(line_plot.T(label=responsible_names[k], data=d, tick_mark=tick))
ar.draw(can)
# second graph (top right)
ar = area.T(legend = legend.T(),
size = (200,100),
loc = (100,250),
x_grid_interval = lambda min, max: [40,60,80,100],
x_grid_style = line_style.gray70_dash1,
x_range = (33, 100),
x_axis = axis.X(label=None, minor_tic_interval = lambda min,max: [50, 70, 90],\
format=lambda x: ""),
y_axis = axis.Y(label="Planned amounts"))
bar_plot.fill_styles.reset();
plot1 = bar_plot.T(label="Cost", data=data, fill_style=fill_style.red)
plot2 = bar_plot.T(label="Revenue", data=data, hcol=2, stack_on = plot1, fill_style=fill_style.blue)
ar.add_plot(plot1, plot2)
ar.draw(can)
# diagonal "pipeline" lines
can.line(line_style.black, 0, 200, 300, 150)
can.line(line_style.black, 0, 0, 300, 50)
# vertical lines
ls = line_style.T(width=0.4, color=color.gray70, dash=(2, 2))
for x in range(120, 300, 60):
can.line(ls, x, 0, x, 250)
# draw arrows to the right
a = arrow.fat1
for y in range(60, 150, 10):
a.draw([(285, y), (315, y)], can=can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
report_custom('report.crm.case')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MyRobotLab/pyrobotlab | home/pedrosenarego/zorba/gestures/addknowledge.py | 3 | 1333 | import os
import sys
import fileinput
import os.path
def addKnowledge(category,pattern):
#### change somethings to make sense############
pattern = pattern.replace('my', 'your')
#### Clean the ending </aiml>############
for line in fileinput.input('/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/aiml/aknowledge.aiml', inplace=1):
sys.stdout.write(line.replace('</aiml>', ''))
#######add the new sentence to aiml############
text_file = open("/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/aiml/aknowledge.aiml", "a")
TotalAmount = '<category><pattern>'+str(category)+'</pattern><template>'+str(category)+' '+str(pattern)+'</template></category>\n</aiml>'
text_file.write("%s" % TotalAmount)
text_file.close()
##### Clean if repeated in the set ############
#for line in fileinput.input('/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/sets/knowledge.txt', inplace=1):
#sys.stdout.write(line.replace(str(category), ''))
#######add the new sentence to knowledge.txt############
text_file = open("/home/pedro/myrobotLab/myrobotLab-1.0.1461/develop/ProgramAB/bots/zorba/sets/knowledge.txt", "a")
TotalAmount = str(category)
text_file.write("%s\n" % TotalAmount)
text_file.close() | apache-2.0 |
ch33kybutt/kernel_skipjack_tuna | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.2/tests/modeltests/aggregation/tests.py | 46 | 20519 | import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["initial_data.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEquals(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
u'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 51.5),
(u'Practical Django Projects', 29.0),
(u'Python Web Development with Django', Approximate(30.3, places=1)),
(u'Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 2),
(u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
(u'Practical Django Projects', 1),
(u'Python Web Development with Django', 3),
(u'Sams Teach Yourself Django in 24 Hours', 1),
(u'The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 4.5),
(u'Brad Dayley', 3.0),
(u'Jacob Kaplan-Moss', 4.5),
(u'James Bennett', 4.0),
(u'Paul Bissex', 4.0),
(u'Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 1),
(u'Brad Dayley', 1),
(u'Jacob Kaplan-Moss', 1),
(u'James Bennett', 1),
(u'Jeffrey Forcier', 1),
(u'Paul Bissex', 1),
(u'Peter Norvig', 2),
(u'Stuart Russell', 1),
(u'Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 7),
(u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
(u'Practical Django Projects', 3),
(u'Python Web Development with Django', 7),
(u'Sams Teach Yourself Django in 24 Hours', 1),
(u'The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
(u'Apress', Decimal("59.69")),
(u"Jonno's House of Books", None),
(u'Morgan Kaufmann', Decimal("75.00")),
(u'Prentice Hall', Decimal("112.49")),
(u'Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 32.0),
(u'Brad Dayley', None),
(u'Jacob Kaplan-Moss', 29.5),
(u'James Bennett', 34.0),
(u'Jeffrey Forcier', 27.0),
(u'Paul Bissex', 31.0),
(u'Peter Norvig', 46.0),
(u'Stuart Russell', 57.0),
(u'Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': u'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': u'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': u'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': u'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
| mit |
mims2707/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/atom/core.py | 80 | 20759 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from xml.dom.minidom import parseString as xmlString
except ImportError:
xmlString = None
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None, pretty_print=None):
"""Converts this object to XML."""
tree_string = ElementTree.tostring(self._to_tree(version, encoding))
if pretty_print and xmlString is not None:
return xmlString(tree_string).toprettyxml()
return tree_string
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
if qname:
return qname[qname.find('}')+1:]
return None
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname is not None and self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
tag = self._get_tag(1)
if tag is None:
tag = ''
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, tag)
else:
self._qname[0] = tag
else:
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| apache-2.0 |
tkem/mopidy-local-sqlite | mopidy_local_sqlite/library.py | 2 | 9430 | from __future__ import unicode_literals
import hashlib
import logging
import operator
import os
import os.path
import sqlite3
import sys
from mopidy import local
from mopidy.exceptions import ExtensionError
from mopidy.local import translator
from mopidy.models import Ref, SearchResult
import uritools
from . import Extension, schema
logger = logging.getLogger(__name__)
class SQLiteLibrary(local.Library):
name = 'sqlite'
def __init__(self, config):
self._config = ext_config = config[Extension.ext_name]
self._data_dir = Extension.get_or_create_data_dir(config)
try:
self._media_dir = config['local']['media_dir']
except KeyError:
raise ExtensionError('Mopidy-Local not enabled')
self._directories = []
for line in ext_config['directories']:
name, uri = line.rsplit(None, 1)
ref = Ref.directory(uri=uri, name=name)
self._directories.append(ref)
self._dbpath = os.path.join(self._data_dir, b'library.db')
self._connection = None
def load(self):
with self._connect() as connection:
version = schema.load(connection)
logger.debug('Using SQLite database schema v%s', version)
return schema.count_tracks(connection)
def lookup(self, uri):
if uri.startswith('local:album'):
return list(schema.lookup(self._connect(), Ref.ALBUM, uri))
elif uri.startswith('local:artist'):
return list(schema.lookup(self._connect(), Ref.ARTIST, uri))
elif uri.startswith('local:track'):
return list(schema.lookup(self._connect(), Ref.TRACK, uri))
else:
logger.error('Invalid lookup URI %s', uri)
return []
def browse(self, uri):
try:
if uri == self.ROOT_DIRECTORY_URI:
return self._directories
elif uri.startswith('local:directory'):
return self._browse_directory(uri)
elif uri.startswith('local:artist'):
return self._browse_artist(uri)
elif uri.startswith('local:album'):
return self._browse_album(uri)
else:
raise ValueError('Invalid browse URI')
except Exception as e:
logger.error('Error browsing %s: %s', uri, e)
return []
def search(self, query=None, limit=100, offset=0, uris=None, exact=False):
q = []
for field, values in (query.items() if query else []):
q.extend((field, value) for value in values)
filters = [f for uri in uris or [] for f in self._filters(uri) if f]
with self._connect() as c:
tracks = schema.search_tracks(c, q, limit, offset, exact, filters)
uri = uritools.uricompose('local', path='search', query=q)
return SearchResult(uri=uri, tracks=tracks)
def get_distinct(self, field, query=None):
q = []
for key, values in (query.items() if query else []):
q.extend((key, value) for value in values)
return set(schema.list_distinct(self._connect(), field, q))
def begin(self):
return schema.tracks(self._connect())
def add(self, track):
try:
track = self._validate_track(track)
schema.insert_track(self._connect(), track)
except Exception as e:
logger.warn('Skipped %s: %s', track.uri, e)
def remove(self, uri):
schema.delete_track(self._connect(), uri)
def flush(self):
if not self._connection:
return False
self._connection.commit()
return True
def close(self):
schema.cleanup(self._connection)
self._connection.commit()
self._connection.close()
self._connection = None
def clear(self):
try:
schema.clear(self._connect())
return True
except sqlite3.Error as e:
logger.error('Error clearing SQLite database: %s', e)
return False
def _connect(self):
if not self._connection:
self._connection = sqlite3.connect(
self._dbpath,
factory=schema.Connection,
timeout=self._config['timeout'],
check_same_thread=False,
)
return self._connection
def _browse_album(self, uri, order=('disc_no', 'track_no', 'name')):
return schema.browse(self._connect(), Ref.TRACK, order, album=uri)
def _browse_artist(self, uri, order=('type', 'name COLLATE NOCASE')):
with self._connect() as c:
albums = schema.browse(c, Ref.ALBUM, order, albumartist=uri)
refs = schema.browse(c, order=order, artist=uri)
album_uris, tracks = {ref.uri for ref in albums}, []
for ref in refs:
if ref.type == Ref.ALBUM and ref.uri not in album_uris:
albums.append(Ref.directory(
uri=uritools.uricompose('local', None, 'directory', dict(
type=Ref.TRACK, album=ref.uri, artist=uri
)),
name=ref.name
))
elif ref.type == Ref.TRACK:
tracks.append(ref)
else:
logger.debug('Skipped SQLite browse result %s', ref.uri)
albums.sort(key=operator.attrgetter('name'))
return albums + tracks
def _browse_directory(self, uri, order=('type', 'name COLLATE NOCASE')):
query = dict(uritools.urisplit(uri).getquerylist())
type = query.pop('type', None)
role = query.pop('role', None)
# TODO: handle these in schema (generically)?
if type == 'date':
format = query.get('format', '%Y-%m-%d')
return map(_dateref, schema.dates(self._connect(), format=format))
if type == 'genre':
return map(_genreref, schema.list_distinct(self._connect(), 'genre')) # noqa
# Fix #38: keep sort order of album tracks; this also applies
# to composers and performers
if type == Ref.TRACK and 'album' in query:
order = ('disc_no', 'track_no', 'name')
if type == Ref.ARTIST and self._config['use_artist_sortname']:
order = ('coalesce(sortname, name) COLLATE NOCASE',)
roles = role or ('artist', 'albumartist') # FIXME: re-think 'roles'...
refs = []
for ref in schema.browse(self._connect(), type, order, role=roles, **query): # noqa
if ref.type == Ref.TRACK or (not query and not role):
refs.append(ref)
elif ref.type == Ref.ALBUM:
refs.append(Ref.directory(uri=uritools.uricompose(
'local', None, 'directory', dict(query, type=Ref.TRACK, album=ref.uri) # noqa
), name=ref.name))
elif ref.type == Ref.ARTIST:
refs.append(Ref.directory(uri=uritools.uricompose(
'local', None, 'directory', dict(query, **{role: ref.uri})
), name=ref.name))
else:
logger.warn('Unexpected SQLite browse result: %r', ref)
return refs
def _validate_artist(self, artist):
if not artist.name:
raise ValueError('Empty artist name')
uri = artist.uri or self._model_uri('artist', artist)
return artist.copy(uri=uri)
def _validate_album(self, album):
if not album.name:
raise ValueError('Empty album name')
uri = album.uri or self._model_uri('album', album)
artists = map(self._validate_artist, album.artists)
return album.copy(uri=uri, artists=artists)
def _validate_track(self, track, encoding=sys.getfilesystemencoding()):
if not track.uri:
raise ValueError('Empty track URI')
if track.name:
name = track.name
else:
path = translator.local_track_uri_to_path(track.uri, b'')
name = os.path.basename(path).decode(encoding, errors='replace')
if track.album and track.album.name:
album = self._validate_album(track.album)
else:
album = None
return track.copy(
name=name,
album=album,
artists=map(self._validate_artist, track.artists),
composers=map(self._validate_artist, track.composers),
performers=map(self._validate_artist, track.performers)
)
def _filters(self, uri):
if uri.startswith('local:directory'):
return [dict(uritools.urisplit(uri).getquerylist())]
elif uri.startswith('local:artist'):
return [{'artist': uri}, {'albumartist': uri}]
elif uri.startswith('local:album'):
return [{'album': uri}]
else:
return []
def _model_uri(self, type, model):
if model.musicbrainz_id and self._config['use_%s_mbid_uri' % type]:
return 'local:%s:mbid:%s' % (type, model.musicbrainz_id)
digest = hashlib.md5(str(model)).hexdigest()
return 'local:%s:md5:%s' % (type, digest)
def _dateref(date):
return Ref.directory(
uri=uritools.uricompose('local', None, 'directory', {'date': date}),
name=date
)
def _genreref(genre):
return Ref.directory(
uri=uritools.uricompose('local', None, 'directory', {'genre': genre}),
name=genre
)
| apache-2.0 |
ynkjm/ryu | ryu/contrib/ncclient/transport/errors.py | 77 | 1293 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ncclient import NCClientError
class TransportError(NCClientError):
pass
class AuthenticationError(TransportError):
pass
class SessionCloseError(TransportError):
def __init__(self, in_buf, out_buf=None):
msg = 'Unexpected session close'
if in_buf:
msg += '\nIN_BUFFER: `%s`' % in_buf
if out_buf:
msg += ' OUT_BUFFER: `%s`' % out_buf
SSHError.__init__(self, msg)
class SSHError(TransportError):
pass
class SSHUnknownHostError(SSHError):
def __init__(self, host, fingerprint):
SSHError.__init__(self, 'Unknown host key [%s] for [%s]' % (fingerprint, host))
self.host = host
self.fingerprint = fingerprint
| apache-2.0 |
amerlyq/piony | piony/config/argparser.py | 1 | 2747 | from argparse import ArgumentParser, RawDescriptionHelpFormatter
import piony
from piony.common.exceptions import InputError
class ArgParser(object):
def __init__(self):
self.ps = ArgumentParser(prog=piony.__appname__,
formatter_class=RawDescriptionHelpFormatter,
description=piony.__doc__, epilog="Enjoy!!!")
self._setup_options()
def parse(self, argv):
if not argv:
argv = []
elif isinstance(argv, str):
argv = argv.split()
elif not isinstance(argv, list):
raise InputError("Wrong argv type: {}".format(type(argv)))
return self.ps.parse_args(argv)
def apply(self, args):
from operator import xor
res = (False, False)
dbg = {'a': (True, True), 'v': (True, False), 'k': (False, True)}
if args.verbose:
for entry in args.verbose:
res = map(xor, res, dbg[entry])
piony.G_DEBUG_VISUALS, piony.G_DEBUG_ACTIONS = res
def _setup_options(self):
## Configuration
farg = self.ps.add_argument
farg('buds', metavar='bud', nargs='*', type=str, default=None,
help="Setup profile layout in json directly on cmdline. "
"Can be specified several times -- one for each slice. "
"Or use pathes to files with slices inside.")
farg('-v', '--version', action='version', default=None,
version="%(prog)s {0}".format(piony.__version__),
help="Version of program.")
gr_window = self.ps.add_argument_group('Window')
warg = gr_window.add_argument
warg('-c', '--config', default=None,
help="Config file with default settings.")
warg('-p', '--print', default=None,
help="Toggle action print/execute to use as frontend only.")
## Appearance
warg('-s', '--size', type=int, default=None,
help="Sets window size WxH=NxN to derive all rings sizes from it.")
warg('-F', '--fullscreen', action='store_true', default=None,
help="Overlay fullscreen/local")
warg('-T', '--no-tooltip', action='store_true', default=None,
help="Disable pop-up items, for those who is irritated.")
## Process
gr_general = self.ps.add_argument_group('General')
garg = gr_general.add_argument
garg('-k', '--kill', action='store_true', default=None,
help="Kill running daemonized program.")
garg('-V', '--verbose', nargs='?', type=str,
const='a', choices=['a', 'v', 'k'], default=None,
help="Verbose (debug): [a]ll (default), [v]isuals, [k]eys.")
| gpl-3.0 |
gautam1858/tensorflow | tensorflow/python/training/adagrad_test.py | 22 | 15078 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1], [3, 4]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariable_Ok(self):
with self.cached_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
@test_util.run_v1_only("b/120545219")
def testDynamicShapeVariableWithCallableInit(self):
var0 = variable_scope.get_variable("var0",
initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(var0.shape.is_fully_defined())
grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
learning_rate = lambda: 3.0
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=True)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val = self.evaluate([var0])
self.assertAllClose([1.0], v0_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0], [var0]))
# Validate updated params
v0_val = self.evaluate([var0])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932]), v0_val)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Stan1989/volatility | volatility/plugins/gui/vtypes/xp.py | 50 | 16283 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
import volatility.plugins.gui.constants as consts
class XP2003x86BaseVTypes(obj.ProfileModification):
"""Applies to everything x86 before Windows 7"""
def check(self, profile):
m = profile.metadata
version = (m.get('major', 0), m.get('minor', 0))
return (m.get('os', None) == 'windows' and
version < (6, 1) and
m.get('memory_model', '32bit') == '32bit')
def modification(self, profile):
profile.vtypes.update({
'tagWINDOWSTATION' : [ 0x5C, {
'dwSessionId' : [ 0x0, ['unsigned long']],
'rpwinstaNext' : [ 0x4, ['pointer', ['tagWINDOWSTATION']]],
'rpdeskList' : [ 0x8, ['pointer', ['tagDESKTOP']]],
'dwWSF_Flags' : [ 0x10, ['unsigned long']],
'ptiDrawingClipboard' : [ 0x1C, ['pointer', ['tagTHREADINFO']]],
'spwndClipOpen' : [ 0x20, ['pointer', ['tagWND']]],
'spwndClipViewer' : [ 0x24, ['pointer', ['tagWND']]],
'spwndClipOwner' : [ 0x28, ['pointer', ['tagWND']]],
'pClipBase' : [ 0x2C, ['pointer', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]],
'cNumClipFormats' : [ 0x30, ['unsigned int']],
'iClipSerialNumber' : [ 0x34, ['unsigned int']],
'iClipSequenceNumber' : [ 0x38, ['unsigned int']],
#'spwndClipboardListener' : [ 0x3C, ['pointer', ['tagWND']]],
'pGlobalAtomTable' : [ 0x40, ['pointer', ['void']]],
}],
## This is defined in Windows 7
'tagCLIP' : [ 12, {
'fmt' : [ 0, ['Enumeration', dict(target = 'unsigned long', choices = consts.CLIPBOARD_FORMAT_ENUM)]],
'hData' : [ 4, ['unsigned int']],
'fGlobalHandle' : [ 8, ['unsigned int']],
}],
'tagDESKTOP' : [ 0x84, {
'dwSessionId' : [ 0x0, ['unsigned long']],
'pDeskInfo' : [ 0x4, ['pointer', ['tagDESKTOPINFO']]],
'rpdeskNext' : [ 0xc, ['pointer', ['tagDESKTOP']]],
'rpwinstaParent' : [ 0x10, ['pointer', ['tagWINDOWSTATION']]],
'hsectionDesktop' : [ 0x40, ['pointer', ['void']]],
'pheapDesktop' : [ 0x44, ['pointer', ['tagWIN32HEAP']]],
'PtiList' : [ 0x64, ['_LIST_ENTRY']],
}],
'tagTHREADINFO' : [ None, { # Same as Win32Thread
'pEThread' : [ 0x00, ['pointer', ['_ETHREAD']]],
'ppi' : [ 0x2C, ['pointer', ['tagPROCESSINFO']]],
'pq' : [ 0x30, ['pointer', ['tagQ']]],
'pDeskInfo' : [ 0x40, ['pointer', ['tagDESKTOPINFO']]],
'PtiLink' : [ 0xAC, ['_LIST_ENTRY']],
'fsHooks' : [ 0x98, ['unsigned long']],
'aphkStart' : [ 0xF4, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagQ' : [ None, {
'mlInput' : [ 0x00, ['tagMLIST']],
}],
'tagMLIST' : [ None, {
'pqmsgRead' : [ 0x00, ['pointer', ['tagQMSG']]],
'cMsgs' : [ 0x08, ['unsigned long']],
}],
'tagQMSG' : [ None, {
'pqmsgNext' : [ 0x00, ['pointer', ['tagQMSG']]],
'pqmsgPrev' : [ 0x04, ['pointer', ['tagQMSG']]],
'msg' : [ 0x08, ['tagMSG']],
}],
'tagMSG' : [ None, {
'hwnd' : [ 0x00, ['unsigned long']],
'message' : [ 0x04, ['unsigned long']],
'wParam' : [ 0x08, ['unsigned long']],
'lParam' : [ 0x0C, ['unsigned long']],
'time' : [ 0x10, ['unsigned long']],
'pt' : [ 0x14, ['tagPOINT']],
}],
'tagPOINT' : [ None, {
'x' : [ 0x00, ['long']],
'y' : [ 0x04, ['long']],
}],
'tagHOOK' : [ None, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'phkNext' : [ 0x14, ['pointer', ['tagHOOK']]],
'iHook' : [ 0x18, ['long']],
'offPfn' : [ 0x1c, ['unsigned long']],
'flags': [ 0x20, ['Flags', {'bitmap': consts.HOOK_FLAGS}]],
'ihmod' : [ 0x24, ['long']],
'ptiHooked' : [ 0x28, ['pointer', ['tagTHREADINFO']]],
'rpdesk' : [ 0x2c, ['pointer', ['tagDESKTOP']]],
}],
'tagDESKTOPINFO' : [ None, {
'pvDesktopBase' : [ 0x0, ['pointer', ['void']]],
'pvDesktopLimit' : [ 0x4, ['pointer', ['void']]],
'spwnd' : [ 0x08, ['pointer', ['tagWND']]],
'fsHooks' : [ 0x0c, ['unsigned long']],
'aphkStart' : [ 0x10, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagSERVERINFO' : [ 0xffc, {
'cHandleEntries' : [ 8, ['unsigned long']],
'cbHandleTable' : [ 0x1bc, ['unsigned long']],
}],
'tagSHAREDINFO' : [ 0x11c, { # From Win7SP0x86
'psi' : [ 0x0, ['pointer', ['tagSERVERINFO']]],
'aheList' : [ 0x4, ['pointer', ['_HANDLEENTRY']]],
'ulSharedDelta' : [ 0xC, ['unsigned long']],
}],
'_HANDLEENTRY' : [ 0xc, { # From Win7SP0x86
'phead' : [ 0x0, ['pointer', ['_HEAD']]],
'pOwner' : [ 0x4, ['pointer', ['void']]],
'bType': [ 8, ['Enumeration', dict(target = 'unsigned char', choices = consts.HANDLE_TYPE_ENUM)]],
'bFlags' : [ 0x9, ['unsigned char']],
'wUniq' : [ 0xa, ['unsigned short']],
}],
'_HEAD' : [ 0x8, { # From Win7SP0x86
'h' : [ 0x0, ['pointer', ['void']]],
'cLockObj' : [ 0x4, ['unsigned long']],
}],
'tagPROCESSINFO' : [ None, {
'Process' : [ 0x0, ['pointer', ['_EPROCESS']]],
}],
'_THRDESKHEAD' : [ 0x14, {
'h' : [ 0x0, ['pointer', ['void']]],
'cLockObj' : [ 0x4, ['unsigned long']],
'pti' : [ 0x8, ['pointer', ['tagTHREADINFO']]],
'rpdesk' : [ 0xc, ['pointer', ['tagDESKTOP']]],
'pSelf' : [ 0x10, ['pointer', ['unsigned char']]],
}],
'tagCLS' : [ 0x5c, {
'pclsNext' : [ 0x0, ['pointer', ['tagCLS']]],
'atomClassName' : [ 0x4, ['unsigned short']],
'atomNVClassName' : [ 0x6, ['unsigned short']],
}],
'tagRECT' : [ 0x10, {
'left' : [ 0x0, ['long']],
'top' : [ 0x4, ['long']],
'right' : [ 0x8, ['long']],
'bottom' : [ 0xc, ['long']],
}],
'tagWND' : [ 0x90, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'ExStyle' : [ 0x1c, ['unsigned long']],
'style' : [ 0x20, ['unsigned long']],
'hModule' : [ 0x24, ['pointer', ['void']]],
'spwndNext' : [ 0x2c, ['pointer', ['tagWND']]],
'spwndPrev' : [ 0x30, ['pointer', ['tagWND']]],
'spwndParent' : [ 0x34, ['pointer', ['tagWND']]],
'spwndChild' : [ 0x38, ['pointer', ['tagWND']]],
'spwndOwner' : [ 0x3c, ['pointer', ['tagWND']]],
'rcWindow' : [ 0x40, ['tagRECT']],
'rcClient' : [ 0x50, ['tagRECT']],
'lpfnWndProc' : [ 0x60, ['pointer', ['void']]],
'pcls' : [ 0x64, ['pointer', ['tagCLS']]],
'strName' : [ 0x80, ['_LARGE_UNICODE_STRING']],
'cbwndExtra' : [ 0x8C, ['long']],
'dwUserData' : [ 0x98, ['unsigned long']],
}],
'_LARGE_UNICODE_STRING' : [ 0xc, {
'Length' : [ 0x0, ['unsigned long']],
'MaximumLength' : [ 0x4, ['BitField', dict(start_bit = 0, end_bit = 31)]],
'bAnsi' : [ 0x4, ['BitField', dict(start_bit = 31, end_bit = 32)]],
'Buffer' : [ 0x8, ['pointer', ['unsigned short']]],
}],
})
class XP2003x64BaseVTypes(obj.ProfileModification):
"""Applies to Windows XP and 2003 x64"""
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x < 6}
def modification(self, profile):
profile.vtypes.update({
'tagWINDOWSTATION' : [ 0x90, { # !poolfind Wind is 100h
'dwSessionId' : [ 0x0, ['unsigned long']],
'rpwinstaNext' : [ 0x8, ['pointer64', ['tagWINDOWSTATION']]], # FreeWindowStation
'rpdeskList' : [ 0x10, ['pointer64', ['tagDESKTOP']]],
'dwWSF_Flags' : [ 0x20, ['unsigned long']], # FreeWindowStation
'ptiDrawingClipboard' : [ 0x38, ['pointer64', ['tagTHREADINFO']]], # xxxDrawClipboard
'spwndClipOpen' : [ 0x40, ['pointer64', ['tagWND']]],
'spwndClipViewer' : [ 0x48, ['pointer64', ['tagWND']]],
'spwndClipOwner' : [ 0x50, ['pointer64', ['tagWND']]],
'pClipBase' : [ 0x58, ['pointer64', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]], # InternalSetClipboardData
'cNumClipFormats' : [ 0x60, ['unsigned int']], # InternalSetClipboardData
'iClipSerialNumber' : [ 0x64, ['unsigned int']], # InternalSetClipboardData
'iClipSequenceNumber' : [ 0x68, ['unsigned int']], # InternalSetClipboardData
'pGlobalAtomTable' : [ 0x70, ['pointer64', ['void']]],
}],
# From Windows 7
'tagCLIP' : [ 0x18, {
'fmt' : [ 0x0, ['Enumeration', dict(target = 'unsigned long', choices = consts.CLIPBOARD_FORMAT_ENUM)]],
'hData' : [ 0x8, ['pointer64', ['void']]],
'fGlobalHandle' : [ 0x10, ['long']],
}],
'tagDESKTOP' : [ 0xd0, { # !poolfind Desk is 140h
'dwSessionId' : [ 0x0, ['unsigned long']],
'pDeskInfo' : [ 0x8, ['pointer64', ['tagDESKTOPINFO']]], # xxxCreateDesktop
'rpdeskNext' : [ 0x18, ['pointer64', ['tagDESKTOP']]], # ParseDesktop
'rpwinstaParent' : [ 0x20, ['pointer64', ['tagWINDOWSTATION']]],
'hsectionDesktop' : [ 0x70, ['pointer64', ['void']]], # MapDesktop
'pheapDesktop' : [ 0x78, ['pointer64', ['tagWIN32HEAP']]], # DesktopAlloc
'PtiList' : [ 0xa0, ['_LIST_ENTRY']], # zzzJournalAttach
}],
'tagTHREADINFO' : [ None, {
'pEThread' : [ 0x00, ['pointer', ['_ETHREAD']]],
'ppi' : [ 0x68, ['pointer64', ['tagPROCESSINFO']]], # xxxSetThreadDesktop
#'pq' : [ 0x30, ['pointer', ['tagQ']]],
'pDeskInfo' : [ 0x90, ['pointer64', ['tagDESKTOPINFO']]], # xxxDesktopThread
'PtiLink' : [ 0x160, ['_LIST_ENTRY']],
'fsHooks' : [ 0x138, ['unsigned long']], # xxxSetThreadDesktop, CheckWHFBits
'aphkStart' : [ 0x140, ['array', 16, ['pointer64', ['tagHOOK']]]],
}],
'tagDESKTOPINFO' : [ None, {
'pvDesktopBase' : [ 0x0, ['pointer64', ['void']]],
'pvDesktopLimit' : [ 0x8, ['pointer64', ['void']]],
'spwnd' : [ 0x10, ['pointer64', ['tagWND']]],
'fsHooks' : [ 0x18, ['unsigned long']], # CheckWHFBits
'aphkStart' : [ 0x20, ['array', 16, ['pointer64', ['tagHOOK']]]],
}],
'tagWND' : [ None, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'ExStyle' : [ 0x30, ['unsigned long']], # xxxCreateWindowEx
'style' : [ 0x34, ['unsigned long']], # xxxCreateWindowEx
'spwndNext' : [ 0x48, ['pointer64', ['tagWND']]],
'spwndPrev' : [ 0x50, ['pointer64', ['tagWND']]],
'spwndParent' : [ 0x58, ['pointer64', ['tagWND']]],
'spwndChild' : [ 0x60, ['pointer64', ['tagWND']]],
'spwndOwner' : [ 0x68, ['pointer64', ['tagWND']]],
'rcWindow' : [ 0x70, ['tagRECT']],
'rcClient' : [ 0x80, ['tagRECT']],
'lpfnWndProc' : [ 0x90, ['pointer64', ['void']]],
'pcls' : [ 0x98, ['pointer64', ['tagCLS']]], # HMChangeOwnerThread
'strName' : [ 0xd0, ['_LARGE_UNICODE_STRING']],
}],
'tagRECT' : [ 0x10, {
'left' : [ 0x0, ['long']],
'top' : [ 0x4, ['long']],
'right' : [ 0x8, ['long']],
'bottom' : [ 0xc, ['long']],
}],
'tagCLS' : [ None, {
'pclsNext' : [ 0x0, ['pointer64', ['tagCLS']]],
'atomClassName' : [ 0x8, ['unsigned short']], # HMChangeOwnerThread
'atomNVClassName' : [ 0xA, ['unsigned short']],
}],
# From Win7 x64
'_LARGE_UNICODE_STRING' : [ 0x10, {
'Length' : [ 0x0, ['unsigned long']],
'MaximumLength' : [ 0x4, ['BitField', dict(start_bit = 0, end_bit = 31, native_type = 'unsigned long')]],
'bAnsi' : [ 0x4, ['BitField', dict(start_bit = 31, end_bit = 32, native_type = 'unsigned long')]],
'Buffer' : [ 0x8, ['pointer64', ['unsigned short']]],
}],
# From Win7 x64
'_THRDESKHEAD' : [ 0x28, {
'h' : [ 0x0, ['pointer64', ['void']]],
'cLockObj' : [ 0x8, ['unsigned long']],
'pti' : [ 0x10, ['pointer64', ['tagTHREADINFO']]],
'rpdesk' : [ 0x18, ['pointer64', ['tagDESKTOP']]],
'pSelf' : [ 0x20, ['pointer64', ['unsigned char']]],
}],
# From Win7 x64
'tagSHAREDINFO' : [ None, {
'psi' : [ 0x0, ['pointer64', ['tagSERVERINFO']]],
'aheList' : [ 0x8, ['pointer64', ['_HANDLEENTRY']]],
#'HeEntrySize' : [ 0x10, ['unsigned long']],
#'pDispInfo' : [ 0x18, ['pointer64', ['tagDISPLAYINFO']]],
'ulSharedDelta' : [ 0x18, ['unsigned long long']],
#'awmControl' : [ 0x28, ['array', 31, ['_WNDMSG']]],
#'DefWindowMsgs' : [ 0x218, ['_WNDMSG']],
#'DefWindowSpecMsgs' : [ 0x228, ['_WNDMSG']],
}],
# From Win7 x64
'_HANDLEENTRY' : [ 0x18, {
'phead' : [ 0x0, ['pointer64', ['_HEAD']]],
'pOwner' : [ 0x8, ['pointer64', ['void']]],
'bType': [ 0x10, ['Enumeration', dict(target = 'unsigned char', choices = consts.HANDLE_TYPE_ENUM)]],
'bFlags' : [ 0x11, ['unsigned char']],
'wUniq' : [ 0x12, ['unsigned short']],
}],
# From Win7 x64
'_HEAD' : [ 0x10, {
'h' : [ 0x0, ['pointer64', ['void']]],
'cLockObj' : [ 0x8, ['unsigned long']],
}],
'tagSERVERINFO' : [ None, {
'cHandleEntries' : [ 8, ['unsigned long']],
'cbHandleTable' : [ 0x330, ['unsigned long']], # HMInitHandleTable
}],
'tagPROCESSINFO' : [ None, {
'Process' : [ 0x0, ['pointer', ['_EPROCESS']]],
}],
# From Win7 x64
'tagHOOK' : [ 0x60, {
'head' : [ 0x0, ['_THRDESKHEAD']],
'phkNext' : [ 0x28, ['pointer64', ['tagHOOK']]],
'iHook' : [ 0x30, ['long']],
'offPfn' : [ 0x38, ['unsigned long long']],
'flags': [ 0x40, ['Flags', {'bitmap': consts.HOOK_FLAGS}]],
'ihmod' : [ 0x44, ['long']],
'ptiHooked' : [ 0x48, ['pointer64', ['tagTHREADINFO']]],
'rpdesk' : [ 0x50, ['pointer64', ['tagDESKTOP']]],
'nTimeout' : [ 0x58, ['BitField', dict(start_bit = 0, end_bit = 7, native_type = 'unsigned long')]],
'fLastHookHung' : [ 0x58, ['BitField', dict(start_bit = 7, end_bit = 8, native_type = 'long')]],
}],
})
| gpl-2.0 |
nesterione/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
strahlc/exaile | xlgui/main.py | 1 | 43837 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import datetime
import logging
import os
import re
import threading
import cairo
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from xl.nls import gettext as _
from xl import (
common,
covers,
event,
formatter,
player,
playlist,
providers,
settings,
trax,
xdg
)
from xlgui.accelerators import AcceleratorManager
from xlgui.playlist_container import PlaylistContainer
from xlgui.widgets import (
dialogs,
info,
menu,
playback
)
from xlgui.widgets.playlist import (
PlaylistPage,
PlaylistView
)
from xlgui import (
guiutil,
tray,
menu as mainmenu
)
logger = logging.getLogger(__name__)
# Length of playback step when user presses seek key (sec)
SEEK_STEP_DEFAULT = 10
# Length of volume steps when user presses up/down key
VOLUME_STEP_DEFAULT = 0.1
class MainWindow(GObject.GObject):
"""
Main Exaile Window
"""
__gproperties__ = {
'is-fullscreen': (bool, 'Fullscreen',
'Whether the window is fullscreen.',
False, # Default
GObject.PARAM_READWRITE),
}
__gsignals__ = {'main-visible-toggle': (GObject.SignalFlags.RUN_LAST, bool, ())}
_mainwindow = None
def __init__(self, controller, builder, collection):
"""
Initializes the main window
@param controller: the main gui controller
"""
GObject.GObject.__init__(self)
self.controller = controller
self.collection = collection
self.playlist_manager = controller.exaile.playlists
self.current_page = -1
self._fullscreen = False
self.resuming = False
self.window_state = 0
self.minimized = False
self.builder = builder
self.window = self.builder.get_object('ExaileWindow')
self.window.set_title('Exaile')
self.title_formatter = formatter.TrackFormatter(settings.get_option(
'gui/main_window_title_format', _('$title (by $artist)') +
' - Exaile'))
self.accelgroup = Gtk.AccelGroup()
self.window.add_accel_group(self.accelgroup)
self.accel_manager = AcceleratorManager('mainwindow-accelerators', self.accelgroup)
self.menubar = self.builder.get_object("mainmenu")
fileitem = self.builder.get_object("file_menu_item")
filemenu = menu.ProviderMenu('menubar-file-menu', self)
fileitem.set_submenu(filemenu)
edititem = self.builder.get_object("edit_menu_item")
editmenu = menu.ProviderMenu('menubar-edit-menu', self)
edititem.set_submenu(editmenu)
viewitem = self.builder.get_object("view_menu_item")
viewmenu = menu.ProviderMenu('menubar-view-menu', self)
viewitem.set_submenu(viewmenu)
toolsitem = self.builder.get_object("tools_menu_item")
toolsmenu = menu.ProviderMenu('menubar-tools-menu', self)
toolsitem.set_submenu(toolsmenu)
helpitem = self.builder.get_object("help_menu_item")
helpmenu = menu.ProviderMenu('menubar-help-menu', self)
helpitem.set_submenu(helpmenu)
self._setup_widgets()
self._setup_position()
self._setup_hotkeys()
logger.info("Connecting main window events...")
self._connect_events()
MainWindow._mainwindow = self
mainmenu._create_menus()
def _setup_hotkeys(self):
"""
Sets up accelerators that haven't been set up in UI designer
"""
hotkeys = (
('<Control>S', lambda *e: self.on_save_playlist()),
('<Shift><Control>S', lambda *e: self.on_save_playlist_as()),
('<Control>F', lambda *e: self.on_panel_filter_focus()),
('<Control>G', lambda *e: self.on_search_playlist_focus()), # FIXME
('<Control><Alt>l', lambda *e: player.QUEUE.clear()), # FIXME
('<Control>P', self._on_playpause_button),
('<Control>Right', lambda *e: self._on_seek_key(True)),
('<Control>Left', lambda *e: self._on_seek_key(False)),
('<Control>plus', lambda *e: self._on_volume_key(True)),
('<Control>minus', lambda *e: self._on_volume_key(False)),
('<Control>Page_Up', self._on_prev_tab_key),
('<Control>Page_Down', self._on_next_tab_key),
('<Alt>N', self._on_focus_playlist_container),
# These 4 are subject to change.. probably should do this
# via a different mechanism too...
('<Alt>I', lambda *e: self.controller.focus_panel('files')),
#('<Alt>C', lambda *e: self.controller.focus_panel('collection')),
('<Alt>R', lambda *e: self.controller.focus_panel('radio')),
('<Alt>L', lambda *e: self.controller.focus_panel('playlists')),
('<Alt>1', lambda *e: self._on_focus_playlist_tab(0)),
('<Alt>2', lambda *e: self._on_focus_playlist_tab(1)),
('<Alt>3', lambda *e: self._on_focus_playlist_tab(2)),
('<Alt>4', lambda *e: self._on_focus_playlist_tab(3)),
('<Alt>5', lambda *e: self._on_focus_playlist_tab(4)),
('<Alt>6', lambda *e: self._on_focus_playlist_tab(5)),
('<Alt>7', lambda *e: self._on_focus_playlist_tab(6)),
('<Alt>8', lambda *e: self._on_focus_playlist_tab(7)),
('<Alt>9', lambda *e: self._on_focus_playlist_tab(8)),
('<Alt>0', lambda *e: self._on_focus_playlist_tab(9)),
)
self.accel_group = Gtk.AccelGroup()
for key, function in hotkeys:
key, mod = Gtk.accelerator_parse(key)
self.accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE,
function)
self.window.add_accel_group(self.accel_group)
def _setup_widgets(self):
"""
Sets up the various widgets
"""
# TODO: Maybe make this stackable
self.message = dialogs.MessageBar(
parent=self.builder.get_object('player_box'),
buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.info_area = MainWindowTrackInfoPane(player.PLAYER)
self.info_area.set_auto_update(True)
self.info_area.set_padding(3, 3, 3, 3)
self.info_area.hide()
self.info_area.set_no_show_all(True)
guiutil.gtk_widget_replace(self.builder.get_object('info_area'), self.info_area)
self.volume_control = playback.VolumeControl(player.PLAYER)
self.info_area.get_action_area().pack_end(self.volume_control, False, False, 0)
self.alpha_style = None
if settings.get_option('gui/use_alpha', False):
screen = self.window.get_screen()
visual = screen.get_rgba_visual()
self.window.set_visual(visual)
self.window.connect('screen-changed', self.on_screen_changed)
self.alpha_style = Gtk.CssProvider.new()
self.window.get_style_context().add_provider(self.alpha_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self._update_alpha()
playlist_area = self.builder.get_object('playlist_area')
self.playlist_container = PlaylistContainer('saved_tabs', player.PLAYER)
for notebook in self.playlist_container.notebooks:
notebook.connect_after('switch-page', self.on_playlist_container_switch_page)
page = notebook.get_current_tab()
if page is not None:
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
playlist_area.pack_start(self.playlist_container, True, True, 3)
self.splitter = self.builder.get_object('splitter')
# In most (all?) RTL locales, the playback controls should still be LTR.
# Just in case that's not always the case, we provide a hidden option to
# force RTL layout instead. This can be removed once we're more certain
# that the default behavior (always LTR) is correct.
controls_direction = Gtk.TextDirection.RTL \
if settings.get_option('gui/rtl_playback_controls') \
else Gtk.TextDirection.LTR
self.play_image = Gtk.Image.new_from_icon_name('media-playback-start',
Gtk.IconSize.SMALL_TOOLBAR)
self.play_image.set_direction(controls_direction)
self.pause_image = Gtk.Image.new_from_icon_name('media-playback-pause',
Gtk.IconSize.SMALL_TOOLBAR)
self.pause_image.set_direction(controls_direction)
play_toolbar = self.builder.get_object('play_toolbar')
play_toolbar.set_direction(controls_direction)
for button in ('playpause', 'next', 'prev', 'stop'):
widget = self.builder.get_object('%s_button' % button)
setattr(self, '%s_button' % button, widget)
widget.get_child().set_direction(controls_direction)
self.progress_bar = playback.SeekProgressBar(player.PLAYER)
self.progress_bar.get_child().set_direction(controls_direction)
# Don't expand vertically; looks awful on Adwaita.
self.progress_bar.set_valign(Gtk.Align.CENTER)
guiutil.gtk_widget_replace(
self.builder.get_object('playback_progressbar_dummy'),
self.progress_bar
)
self.stop_button.toggle_spat = False
self.stop_button.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.stop_button.connect('motion-notify-event',
self.on_stop_button_motion_notify_event)
self.stop_button.connect('leave-notify-event',
self.on_stop_button_leave_notify_event)
self.stop_button.connect('key-press-event',
self.on_stop_button_key_press_event)
self.stop_button.connect('key-release-event',
self.on_stop_button_key_release_event)
self.stop_button.connect('focus-out-event',
self.on_stop_button_focus_out_event)
self.stop_button.connect('button-press-event',
self.on_stop_button_press_event)
self.stop_button.connect('button-release-event',
self.on_stop_button_release_event)
self.stop_button.drag_dest_set(Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0)], Gdk.DragAction.COPY)
self.stop_button.connect('drag-motion',
self.on_stop_button_drag_motion)
self.stop_button.connect('drag-leave',
self.on_stop_button_drag_leave)
self.stop_button.connect('drag-data-received',
self.on_stop_button_drag_data_received)
self.statusbar = info.Statusbar(self.builder.get_object('status_bar'))
event.add_ui_callback(self.on_exaile_loaded, 'exaile_loaded')
def _connect_events(self):
"""
Connects the various events to their handlers
"""
self.builder.connect_signals({
'on_configure_event': self.configure_event,
'on_window_state_event': self.window_state_change_event,
'on_delete_event': self.on_delete_event,
'on_playpause_button_clicked': self._on_playpause_button,
'on_next_button_clicked':
lambda *e: player.QUEUE.next(),
'on_prev_button_clicked':
lambda *e: player.QUEUE.prev(),
'on_about_item_activate': self.on_about_item_activate,
# Controller
# 'on_scan_collection_item_activate': self.controller.on_rescan_collection,
# 'on_device_manager_item_activate': lambda *e: self.controller.show_devices(),
# 'on_track_properties_activate':self.controller.on_track_properties,
})
event.add_ui_callback(self.on_playback_resume, 'playback_player_resume',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_player_end',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playback_start, 'playback_track_start',
player.PLAYER)
event.add_ui_callback(self.on_toggle_pause, 'playback_toggle_pause',
player.PLAYER)
event.add_ui_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_ui_callback(self.on_buffering, 'playback_buffering',
player.PLAYER)
event.add_ui_callback(self.on_playback_error, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playlist_tracks_added,
'playlist_tracks_added')
event.add_ui_callback(self.on_playlist_tracks_removed,
'playlist_tracks_removed')
# Settings
self._on_option_set('gui_option_set', settings, 'gui/show_info_area')
self._on_option_set('gui_option_set', settings, 'gui/show_info_area_covers')
event.add_ui_callback(self._on_option_set, 'option_set')
def _connect_panel_events(self):
"""
Sets up panel events
"""
# When there's nothing in the notebook, hide it
self.controller.panel_notebook.connect('page-added', self.on_panel_notebook_add_page)
self.controller.panel_notebook.connect('page-removed', self.on_panel_notebook_remove_page)
# panels
panels = self.controller.panel_notebook.panels
for panel_name in ('playlists', 'radio', 'files', 'collection'):
panel = panels[panel_name].panel
sort = False
if panel_name in ('files', 'collection'):
sort = True
panel.connect('append-items', lambda panel, items, force_play, sort=sort:
self.on_append_items(items, force_play, sort=sort))
panel.connect('queue-items', lambda panel, items, sort=sort:
self.on_append_items(items, queue=True, sort=sort))
panel.connect('replace-items', lambda panel, items, sort=sort:
self.on_append_items(items, replace=True, sort=sort))
## Collection Panel
panel = panels['collection'].panel
panel.connect('collection-tree-loaded', self.on_collection_tree_loaded)
## Playlist Panel
panel = panels['playlists'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Radio Panel
panel = panels['radio'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Files Panel
#panel = panels['files']
def _update_alpha(self):
if self.alpha_style is None:
return
opac = 1.0 - float(settings.get_option('gui/transparency'))
self.alpha_style.load_from_data(
'.background { ' +
('background-color: alpha(@theme_bg_color, %s);' % opac) +
'}'
)
def do_get_property(self, prop):
if prop.name == 'is-fullscreen':
return self._fullscreen
else:
return GObject.GObject.do_get_property(self, prop)
def do_set_property(self, prop, value):
if prop.name == 'is-fullscreen':
if value:
self.window.fullscreen()
else:
self.window.unfullscreen()
else:
GObject.GObject.do_set_property(self, prop, value)
def on_screen_changed(self, widget, event):
"""
Updates the colormap on screen change
"""
screen = widget.get_screen()
visual = screen.get_rgba_visual() or screen.get_rgb_visual()
self.window.set_visual(visual)
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_panel_notebook_add_page(self, notebook, page, page_num):
if self.splitter.get_child1() is None:
self.splitter.pack1(self.controller.panel_notebook)
self.controller.panel_notebook.get_parent() \
.child_set_property(self.controller.panel_notebook, 'shrink', False)
def on_panel_notebook_remove_page(self, notebook, page, page_num):
if notebook.get_n_pages() == 0:
self.splitter.remove(self.controller.panel_notebook)
def on_stop_button_motion_notify_event(self, widget, event):
"""
Sets the hover state and shows SPAT icon
"""
widget.__hovered = True
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
else:
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_leave_notify_event(self, widget, event):
"""
Unsets the hover state and resets the button icon
"""
widget.__hovered = False
if not widget.is_focus() and \
~(event.get_state() & Gdk.ModifierType.SHIFT_MASK):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_key_press_event(self, widget, event):
"""
Shows SPAT icon on Shift key press
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = True
if event.keyval in (Gdk.KEY_space, Gdk.KEY_Return):
if widget.toggle_spat:
self.on_spat_clicked()
else:
player.PLAYER.stop()
def on_stop_button_key_release_event(self, widget, event):
"""
Resets the button icon
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = False
def on_stop_button_focus_out_event(self, widget, event):
"""
Resets the button icon unless
the button is still hovered
"""
if not getattr(widget, '__hovered', False):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_press_event(self, widget, event):
"""
Called when the user clicks on the stop button
"""
if event.button == 1:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.on_spat_clicked()
elif event.button == 3:
menu = guiutil.Menu()
menu.append(_("Toggle: Stop after Selected Track"),
self.on_spat_clicked,
'process-stop')
menu.popup(None, None, None, None, event.button, event.time)
def on_stop_button_release_event(self, widget, event):
"""
Called when the user releases the mouse from the stop button
"""
rect = widget.get_allocation()
if 0 <= event.x < rect.width and 0 <= event.y < rect.height:
player.PLAYER.stop()
def on_stop_button_drag_motion(self, widget, context, x, y, time):
"""
Indicates possible SPAT during drag motion of tracks
"""
target = widget.drag_dest_find_target(context, widget.drag_dest_get_target_list()).name()
if target == 'exaile-index-list':
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_leave(self, widget, context, time):
"""
Resets the stop button
"""
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_data_received(self, widget, context, x, y, selection, info, time):
"""
Allows for triggering the SPAT feature
by dropping tracks on the stop button
"""
source_widget = Gtk.drag_get_source_widget(context)
if selection.target.name() == 'exaile-index-list' and isinstance(source_widget, PlaylistView):
position = int(selection.data.split(',')[0])
if position == source_widget.playlist.spat_position:
position = -1
source_widget.playlist.spat_position = position
source_widget.queue_draw()
def on_spat_clicked(self, *e):
"""
Called when the user clicks on the SPAT item
"""
trs = self.get_selected_page().view.get_selected_items()
if not trs: return
# TODO: this works, but implement this some other way in the future
if player.QUEUE.current_playlist.spat_position == -1:
player.QUEUE.current_playlist.spat_position = trs[0][0]
else:
player.QUEUE.current_playlist.spat_position = -1
self.get_selected_page().view.queue_draw()
def on_append_items(self, tracks, force_play=False, queue=False, sort=False, replace=False):
"""
Called when a panel (or other component)
has tracks to append and possibly queue
:param tracks: The tracks to append
:param force_play: Force playing the first track if there
is no track currently playing. Otherwise
check a setting to determine whether the
track should be played
:param queue: Additionally queue tracks
:param sort: Sort before adding
:param replace: Clear playlist before adding
"""
if len(tracks) == 0:
return
page = self.get_selected_page()
if sort:
tracks = trax.sort_tracks(
('artist', 'date', 'album', 'discnumber', 'tracknumber'),
tracks)
if replace:
page.playlist.clear()
offset = len(page.playlist)
page.playlist.extend(tracks)
# extending the queue automatically starts playback
if queue:
if player.QUEUE is not page.playlist:
player.QUEUE.extend(tracks)
elif (force_play or settings.get_option( 'playlist/append_menu_starts_playback', False )) and \
not player.PLAYER.current:
page.view.play_track_at(offset, tracks[0])
def on_playback_error(self, type, player, message):
"""
Called when there has been a playback error
"""
self.message.show_error(_('Playback error encountered!'), message)
def on_buffering(self, type, player, percent):
"""
Called when a stream is buffering
"""
percent = min(percent, 100)
self.statusbar.set_status(_("Buffering: %d%%...") % percent, 1)
def on_track_tags_changed(self, type, track, tag):
"""
Called when tags are changed
"""
if track is player.PLAYER.current:
self._update_track_information()
def on_collection_tree_loaded(self, tree):
"""
Updates information on collection tree load
"""
self.statusbar.update_info()
def on_exaile_loaded(self, event_type, exaile, nothing):
"""
Updates information on exaile load
"""
self.statusbar.update_info()
event.remove_callback(self.on_exaile_loaded, 'exaile_loaded')
def on_playlist_tracks_added(self, type, playlist, tracks):
"""
Updates information on track add
"""
self.statusbar.update_info()
def on_playlist_tracks_removed(self, type, playlist, tracks):
"""
Updates information on track removal
"""
self.statusbar.update_info()
def on_toggle_pause(self, type, player, object):
"""
Called when the user clicks the play button after playback has
already begun
"""
if player.is_paused():
image = self.play_image
tooltip = _('Continue Playback')
else:
image = self.pause_image
tooltip = _('Pause Playback')
self.playpause_button.set_image(image)
self.playpause_button.set_tooltip_text(tooltip)
self._update_track_information()
def on_playlist_container_switch_page(self, notebook, page, page_num):
"""
Updates info after notebook page switch
"""
page = notebook.get_nth_page(page_num)
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
self.statusbar.update_info()
def on_playlist_view_selection_changed(self, selection):
"""
Updates info after playlist page selection change
"""
self.statusbar.update_info()
def on_panel_filter_focus(self, *e):
"""
Gives focus to the filter field of the current panel
"""
try:
self.controller.get_active_panel().filter.grab_focus()
except (AttributeError, KeyError):
pass
def on_search_playlist_focus(self, *e):
"""
Gives focus to the playlist search bar
"""
plpage = get_selected_playlist()
if plpage:
plpage.get_search_entry().grab_focus()
def on_save_playlist(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save dialog of the currently selected playlist tab if
not custom, saves changes directly if custom
"""
tab = self.get_selected_tab()
if not tab: return
if tab.page.playlist.get_is_custom():
tab.do_save_changes_to_custom()
else:
tab.do_save_custom()
def on_save_playlist_as(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save as dialog of the current playlist tab
"""
tab = self.get_selected_tab()
if not tab: return
tab.do_save_custom()
def on_clear_playlist(self, *e):
"""
Clears the current playlist tab
"""
page = self.get_selected_page()
if page:
page.playlist.clear()
def on_open_item_activate(self, menuitem):
"""
Shows a dialog to open media
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.MediaOpenDialog(self.window)
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_open_url_item_activate(self, menuitem):
"""
Shows a dialog to open an URI
"""
def on_uri_selected(dialog, uri):
self.controller.open_uri(uri, play=False)
dialog = dialogs.URIOpenDialog(self.window)
dialog.connect('uri-selected', on_uri_selected)
dialog.show()
def on_open_directories_item_activate(self, menuitem):
"""
Shows a dialog to open directories
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.DirectoryOpenDialog(self.window)
# Selecting empty folders is useless
dialog.props.create_folders = False
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_export_current_playlist_activate(self, menuitem):
"""
Shows a dialog to export the current playlist
"""
page = self.get_selected_page()
if not page or not isinstance(page, PlaylistPage):
return
def on_message(dialog, message_type, message):
"""
Show messages in the main window message area
"""
if message_type == Gtk.MessageType.INFO:
self.message.show_info(markup=message)
elif message_type == Gtk.MessageType.ERROR:
self.message.show_error(_('Playlist export failed!'), message)
return True
dialog = dialogs.PlaylistExportDialog(page.playlist, self.window)
dialog.connect('message', on_message)
dialog.show()
def on_playlist_utilities_bar_visible_toggled(self, checkmenuitem):
"""
Shows or hides the playlist utilities bar
"""
settings.set_option('gui/playlist_utilities_bar_visible',
checkmenuitem.get_active())
def on_show_playing_track_item_activate(self, menuitem):
"""
Tries to show the currently playing track
"""
self.playlist_container.show_current_track()
def on_about_item_activate(self, menuitem):
"""
Shows the about dialog
"""
dialog = dialogs.AboutDialog(self.window)
dialog.show()
def on_playback_resume(self, type, player, data):
self.resuming = True
def on_playback_start(self, type, player, object):
"""
Called when playback starts
Sets the currently playing track visible in the currently selected
playlist if the user has chosen this setting
"""
if self.resuming:
self.resuming = False
return
self._update_track_information()
self.playpause_button.set_image(self.pause_image)
self.playpause_button.set_tooltip_text(_('Pause Playback'))
def on_playback_end(self, type, player, object):
"""
Called when playback ends
"""
self.window.set_title('Exaile')
self.playpause_button.set_image(self.play_image)
self.playpause_button.set_tooltip_text(_('Start Playback'))
def _on_option_set(self, name, object, option):
"""
Handles changes of settings
"""
if option == 'gui/main_window_title_format':
self.title_formatter.props.format = settings.get_option(
option, self.title_formatter.props.format)
elif option == 'gui/use_tray':
usetray = settings.get_option(option, False)
if self.controller.tray_icon and not usetray:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
elif not self.controller.tray_icon and usetray:
self.controller.tray_icon = tray.TrayIcon(self)
elif option == 'gui/show_info_area':
self.info_area.set_no_show_all(False)
if settings.get_option(option, True):
self.info_area.show_all()
else:
self.info_area.hide()
self.info_area.set_no_show_all(True)
elif option == 'gui/show_info_area_covers':
cover = self.info_area.cover
cover.set_no_show_all(False)
if settings.get_option(option, True):
cover.show_all()
else:
cover.hide()
cover.set_no_show_all(True)
elif option == 'gui/transparency':
self._update_alpha()
def _on_volume_key(self, is_up):
diff = int(100 * settings.get_option('gui/volue_key_step_size', VOLUME_STEP_DEFAULT))
if not is_up: diff = -diff
player.PLAYER.modify_volume(diff)
return True
def _on_seek_key(self, is_forward):
diff = settings.get_option('gui/seek_key_step_size', SEEK_STEP_DEFAULT)
if not is_forward: diff = -diff
if player.PLAYER.current:
player.PLAYER.modify_time(diff)
self.progress_bar.update_progress()
return True
def _on_prev_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_prev_tab()
return True
def _on_next_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_next_tab()
return True
def _on_playpause_button(self, *e):
self.playpause()
return True
def _on_focus_playlist_tab(self, tab_nr):
self.playlist_container.get_current_notebook().focus_tab(tab_nr)
return True
def _on_focus_playlist_container(self, *_e):
self.playlist_container.focus()
return True
def _update_track_information(self):
"""
Sets track information
"""
track = player.PLAYER.current
if not track:
return
self.window.set_title(self.title_formatter.format(track))
def playpause(self):
"""
Pauses the playlist if it is playing, starts playing if it is
paused. If stopped, try to start playing the next suitable track.
"""
if player.PLAYER.is_paused() or player.PLAYER.is_playing():
player.PLAYER.toggle_pause()
else:
pl = self.get_selected_page()
player.QUEUE.set_current_playlist(pl.playlist)
try:
trackpath = pl.view.get_selected_paths()[0]
pl.playlist.current_position = trackpath[0]
except IndexError:
pass
player.QUEUE.play(track=pl.playlist.current)
def _setup_position(self):
"""
Sets up the position and sized based on the size the window was
when it was last moved or resized
"""
if settings.get_option('gui/mainw_maximized', False):
self.window.maximize()
width = settings.get_option('gui/mainw_width', 500)
height = settings.get_option('gui/mainw_height', 475)
x = settings.get_option('gui/mainw_x', 10)
y = settings.get_option('gui/mainw_y', 10)
self.window.move(x, y)
self.window.resize(width, height)
pos = settings.get_option('gui/mainw_sash_pos', 200)
self.splitter.set_position(pos)
def on_delete_event(self, *e):
"""
Called when the user attempts to close the window
"""
sash_pos = self.splitter.get_position()
if sash_pos > 10:
settings.set_option('gui/mainw_sash_pos', sash_pos)
if settings.get_option('gui/use_tray', False) and \
settings.get_option('gui/close_to_tray', False):
self.window.hide()
else:
self.quit()
return True
def quit(self, *e):
"""
Quits Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit)
return True
def on_restart_item_activate(self, menuitem):
"""
Restarts Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit, True)
def toggle_visible(self, bringtofront=False):
"""
Toggles visibility of the main window
"""
toggle_handled = self.emit('main-visible-toggle')
if not toggle_handled:
if bringtofront and self.window.is_active() or \
not bringtofront and self.window.get_property('visible'):
self.window.hide()
else:
# the ordering for deiconify/show matters -- if this gets
# switched, then the minimization detection breaks
self.window.deiconify()
self.window.show()
def configure_event(self, *e):
"""
Called when the window is resized or moved
"""
# Don't save window size if it is maximized or fullscreen.
if settings.get_option('gui/mainw_maximized', False) or \
self._fullscreen:
return False
(width, height) = self.window.get_size()
if [width, height] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["width", "height"] ]:
settings.set_option('gui/mainw_height', height, save=False)
settings.set_option('gui/mainw_width', width, save=False)
(x, y) = self.window.get_position()
if [x, y] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["x", "y"] ]:
settings.set_option('gui/mainw_x', x, save=False)
settings.set_option('gui/mainw_y', y, save=False)
return False
def window_state_change_event(self, window, event):
"""
Saves the current maximized and fullscreen
states and minimizes to tray if requested
"""
if event.changed_mask & Gdk.WindowState.MAXIMIZED:
settings.set_option('gui/mainw_maximized',
bool(event.new_window_state & Gdk.WindowState.MAXIMIZED))
if event.changed_mask & Gdk.WindowState.FULLSCREEN:
self._fullscreen = bool(event.new_window_state & Gdk.WindowState.FULLSCREEN)
self.notify('is-fullscreen')
# detect minimization state changes
prev_minimized = self.minimized
if not self.minimized:
if event.changed_mask & Gdk.WindowState.ICONIFIED and \
not event.changed_mask & Gdk.WindowState.WITHDRAWN and \
event.new_window_state & Gdk.WindowState.ICONIFIED and \
not event.new_window_state & Gdk.WindowState.WITHDRAWN and \
not self.window_state & Gdk.WindowState.ICONIFIED:
self.minimized = True
else:
if event.changed_mask & Gdk.WindowState.WITHDRAWN and \
not event.new_window_state & (Gdk.WindowState.WITHDRAWN): #and \
self.minimized = False
# track this
self.window_state = event.new_window_state
if settings.get_option('gui/minimize_to_tray', False):
# old code to detect minimization
# -> it must have worked at some point, perhaps this is a GTK version
# specific set of behaviors? Current code works now on 2.24.17
#if wm_state is not None:
# if '_NET_WM_STATE_HIDDEN' in wm_state[2]:
# show tray
# window.hide
#else
# destroy tray
if self.minimized != prev_minimized and self.minimized == True:
if not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is None:
self.controller.tray_icon = tray.TrayIcon(self)
window.hide()
elif not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is not None:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
return False
def get_selected_page(self):
"""
Returns the currentry displayed playlist notebook page
"""
return self.playlist_container.get_current_tab()
def get_selected_playlist(self):
try:
page = self.get_selected_page()
except AttributeError:
return None
if not isinstance(page, PlaylistPage):
return None
return page
class MainWindowTrackInfoPane(info.TrackInfoPane, providers.ProviderHandler):
"""
Extends the regular track info pane by an area for custom widgets
The mainwindow-info-area-widget provider is used to show widgets
on the right of the info area. They should be small. The registered
provider should provide a method 'create_widget' that takes the info
area instance as a parameter, and that returns a Gtk.Widget to be
inserted into the widget_area of the info area, and an attribute
'name' that will be used when removing the provider.
"""
def __init__(self, player):
info.TrackInfoPane.__init__(self, player)
self.__player = player
self.widget_area = Gtk.Box()
self.get_child().pack_start(self.widget_area, False, False, 0)
self.__widget_area_widgets = {}
# call this last if we're using simple_init=True
providers.ProviderHandler.__init__(self, 'mainwindow-info-area-widget',
target=player, simple_init=True)
def get_player(self):
'''
Retrieves the player object that this info area
is associated with
'''
return self._TrackInfoPane__player
def on_provider_added(self, provider):
name = provider.name
widget = provider.create_widget(self)
old_widget = self.__widget_area_widgets.get(name)
if old_widget is not None:
self.widget_area.remove(old_widget)
old_widget.destroy()
self.__widget_area_widgets[name] = widget
self.widget_area.pack_start(widget, False, False, 0)
widget.show_all()
def on_provider_removed(self, provider):
widget = self.__widget_area_widgets.pop(provider.name, None)
if widget is not None:
self.widget_area.remove(widget)
widget.destroy()
def get_playlist_container():
return MainWindow._mainwindow.playlist_container
def get_playlist_notebook():
'''Retrieves the primary playlist notebook'''
return MainWindow._mainwindow.playlist_container.notebooks[0]
def get_selected_page():
return MainWindow._mainwindow.get_selected_page()
def get_selected_playlist():
return MainWindow._mainwindow.get_selected_playlist()
def mainwindow():
return MainWindow._mainwindow
# vim: et sts=4 sw=4
| gpl-2.0 |
chvogl/tardis | tardis/io/config_reader.py | 1 | 40145 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
convergence_parameters = ['damping_constant', 'threshold', 'fraction',
'hold_iterations']
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
convergence_variable_section = convergence_section_dict[convergence_variable]
for param in convergence_parameters:
if convergence_variable_section.get(param, None) is None:
if param in convergence_section_dict:
convergence_section_dict[convergence_variable][param] = (
convergence_section_dict[param])
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cycles': 1,
't_inner_update_exponent': -0.5,
'damping_constant': 0.5}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - '
'just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(default_convergence_section))
else:
montecarlo_section['convergence_strategy'] = (
parse_convergence_section(
montecarlo_section['convergence_strategy']))
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause |
vitor-alves/pixel-canvas-bot | packages/chardet/langhungarianmodel.py | 269 | 12592 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'char_to_order_map': Latin2_HungarianCharToOrderMap,
'precedence_matrix': HungarianLangModel,
'typical_positive_ratio': 0.947368,
'keep_english_letter': True,
'charset_name': "ISO-8859-2",
'language': 'Hungarian',
}
Win1250HungarianModel = {
'char_to_order_map': win1250HungarianCharToOrderMap,
'precedence_matrix': HungarianLangModel,
'typical_positive_ratio': 0.947368,
'keep_english_letter': True,
'charset_name': "windows-1250",
'language': 'Hungarian',
}
| gpl-3.0 |
ShownX/incubator-mxnet | example/rcnn/rcnn/io/rpn.py | 34 | 10297 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
RPN:
data =
{'data': [num_images, c, h, w],
'im_info': [num_images, 4] (optional)}
label =
{'gt_boxes': [num_boxes, 5] (optional),
'label': [batch_size, 1] <- [batch_size, num_anchors, feat_height, feat_width],
'bbox_target': [batch_size, num_anchors, feat_height, feat_width],
'bbox_weight': [batch_size, num_anchors, feat_height, feat_width]}
"""
import logging
import numpy as np
import numpy.random as npr
from ..logger import logger
from ..config import config
from .image import get_image, tensor_vstack
from ..processing.generate_anchor import generate_anchors
from ..processing.bbox_transform import bbox_overlaps, bbox_transform
def get_rpn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {}
return data, label, im_info
def get_rpn_batch(roidb):
"""
prototype for rpn batch: data, im_info, gt_boxes
:param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
:return: data, label
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt boxes: (x1, y1, x2, y2, cls)
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {'gt_boxes': gt_boxes}
return data, label
def assign_anchor(feat_shape, gt_boxes, im_info, feat_stride=16,
scales=(8, 16, 32), ratios=(0.5, 1, 2), allowed_border=0):
"""
assign ground truth boxes to anchor positions
:param feat_shape: infer output shape
:param gt_boxes: assign ground truth
:param im_info: filter out anchors overlapped with edges
:param feat_stride: anchor position step
:param scales: used to generate anchors, affects num_anchors (per location)
:param ratios: aspect ratios of generated anchors
:param allowed_border: filter out anchors with edge overlap > allowed_border
:return: dict of label
'label': of shape (batch_size, 1) <- (batch_size, num_anchors, feat_height, feat_width)
'bbox_target': of shape (batch_size, num_anchors * 4, feat_height, feat_width)
'bbox_inside_weight': *todo* mark the assigned anchors
'bbox_outside_weight': used to normalize the bbox_loss, all weights sums to RPN_POSITIVE_WEIGHT
"""
def _unmap(data, count, inds, fill=0):
"""" unmap a subset inds of data into original data of size count """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
im_info = im_info[0]
scales = np.array(scales, dtype=np.float32)
base_anchors = generate_anchors(base_size=feat_stride, ratios=list(ratios), scales=scales)
num_anchors = base_anchors.shape[0]
feat_height, feat_width = feat_shape[-2:]
logger.debug('anchors: %s' % base_anchors)
logger.debug('anchor shapes: %s' % np.hstack((base_anchors[:, 2::4] - base_anchors[:, 0::4],
base_anchors[:, 3::4] - base_anchors[:, 1::4])))
logger.debug('im_info %s' % im_info)
logger.debug('height %d width %d' % (feat_height, feat_width))
logger.debug('gt_boxes shape %s' % np.array(gt_boxes.shape))
logger.debug('gt_boxes %s' % gt_boxes)
# 1. generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = base_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where((all_anchors[:, 0] >= -allowed_border) &
(all_anchors[:, 1] >= -allowed_border) &
(all_anchors[:, 2] < im_info[1] + allowed_border) &
(all_anchors[:, 3] < im_info[0] + allowed_border))[0]
logger.debug('total_anchors %d' % total_anchors)
logger.debug('inds_inside %d' % len(inds_inside))
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
logger.debug('anchors shape %s' % np.array(anchors.shape))
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
if gt_boxes.size > 0:
# overlap between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
labels[max_overlaps >= config.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
else:
labels[:] = 0
# subsample positive labels if we have too many
num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if logger.level == logging.DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = config.TRAIN.RPN_BATCH_SIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if logger.level == logging.DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
if gt_boxes.size > 0:
bbox_targets[:] = bbox_transform(anchors, gt_boxes[argmax_overlaps, :4])
bbox_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_weights[labels == 1, :] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)
if logger.level == logging.DEBUG:
_sums = bbox_targets[labels == 1, :].sum(axis=0)
_squared_sums = (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
_counts = np.sum(labels == 1)
means = _sums / (_counts + 1e-14)
stds = np.sqrt(_squared_sums / _counts - means ** 2)
logger.debug('means %s' % means)
logger.debug('stdevs %s' % stds)
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_weights = _unmap(bbox_weights, total_anchors, inds_inside, fill=0)
if logger.level == logging.DEBUG:
if gt_boxes.size > 0:
logger.debug('rpn: max max_overlaps %f' % np.max(max_overlaps))
logger.debug('rpn: num_positives %f' % np.sum(labels == 1))
logger.debug('rpn: num_negatives %f' % np.sum(labels == 0))
_fg_sum = np.sum(labels == 1)
_bg_sum = np.sum(labels == 0)
_count = 1
logger.debug('rpn: num_positive avg %f' % (_fg_sum / _count))
logger.debug('rpn: num_negative avg %f' % (_bg_sum / _count))
labels = labels.reshape((1, feat_height, feat_width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, A * feat_height * feat_width))
bbox_targets = bbox_targets.reshape((1, feat_height, feat_width, A * 4)).transpose(0, 3, 1, 2)
bbox_weights = bbox_weights.reshape((1, feat_height, feat_width, A * 4)).transpose((0, 3, 1, 2))
label = {'label': labels,
'bbox_target': bbox_targets,
'bbox_weight': bbox_weights}
return label
| apache-2.0 |
zyxcambridge/RecordExistence | code/web/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
shellderp/sublime-robot-plugin | lib/robot/running/runkwregister.py | 2 | 1734 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from robot import utils
class _RunKeywordRegister:
def __init__(self):
self._libs = {}
def register_run_keyword(self, libname, keyword, args_to_process=None):
if args_to_process is None:
args_to_process = self._get_args_from_method(keyword)
keyword = keyword.__name__
if libname not in self._libs:
self._libs[libname] = utils.NormalizedDict(ignore=['_'])
self._libs[libname][keyword] = int(args_to_process)
def get_args_to_process(self, libname, kwname):
if libname in self._libs and kwname in self._libs[libname]:
return self._libs[libname][kwname]
return -1
def is_run_keyword(self, libname, kwname):
return self.get_args_to_process(libname, kwname) >= 0
def _get_args_from_method(self, method):
if inspect.ismethod(method):
return method.im_func.func_code.co_argcount -1
elif inspect.isfunction(method):
return method.func_code.co_argcount
raise ValueError("Needs function or method!")
RUN_KW_REGISTER = _RunKeywordRegister()
| apache-2.0 |
PokemonGoF/PokemonGo-Bot-Desktop | build/pywin/Lib/hmac.py | 70 | 4588 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from operator import _compare_digest as compare_digest
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| mit |
h3biomed/ansible | lib/ansible/modules/database/postgresql/postgresql_idx.py | 2 | 15109 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrey Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a PostgreSQL database.
- For more information see U(https://www.postgresql.org/docs/current/sql-createindex.html),
U(https://www.postgresql.org/docs/current/sql-dropindex.html).
version_added: '2.8'
options:
idxname:
description:
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database to connect to and where the index will be created/dropped.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema where the index will be created.
type: str
state:
description:
- Index state.
- I(state=present) implies the index will be created if it does not exist.
- I(state=absent) implies the index will be dropped if it exists.
type: str
default: present
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
required: true
columns:
description:
- List of index columns that need to be covered by index.
- Mutually exclusive with I(state=absent).
type: list
aliases:
- column
cond:
description:
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
- If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
In this case it should be dropped and created again.
- Mutually exclusive with I(cascade=yes).
type: bool
default: yes
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
required: false
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects U(https://www.postgresql.org/docs/current/sql-dropindex.html).
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=yes)
type: bool
default: no
notes:
- The index building process can affect database performance.
- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements:
- psycopg2
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
postgresql_idx:
db: acme
table: products
columns: id,name
name: test_idx
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: no
idxtype: gin
- name: Drop btree test_idx concurrently
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
- name: Drop test_idx cascade
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: yes
concurrent: no
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
postgresql_idx:
db: mydb
table: test
columns: id,comment
idxname: test_idx
cond: id > 1
'''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError
from ansible.module_utils.postgres import connect_to_db, get_conn_params, postgres_common_argument_spec
from ansible.module_utils._text import to_native
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# ===========================================
# PostgreSQL module specific support methods.
#
class Index(object):
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def get_info(self):
"""
Getter to refresh and return table info
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""
Check index and collect info
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = '%s'" % self.name)
res = self.__exec_sql(query)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
else:
self.exists = False
return False
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True):
"""
Create PostgreSQL index.
"""
# To change existing index we should write
# 'postgresql_alter_table' standalone module.
if self.exists:
return False
changed = False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' %s' % self.name
if self.schema:
query += ' ON %s.%s ' % (self.schema, tblname)
else:
query += 'public.%s ' % tblname
query += 'USING %s (%s)' % (idxtype, columns)
if storage_params:
query += ' WITH (%s)' % storage_params
if tblspace:
query += ' TABLESPACE %s' % tblspace
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def drop(self, schema, cascade=False, concurrent=True):
"""
Drop PostgreSQL index.
"""
changed = False
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
if not schema:
query += ' public.%s' % self.name
else:
query += ' %s.%s' % (schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
if self.__exec_sql(query, ddl=True):
return True
return False
def __exec_sql(self, query, ddl=False):
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
table=dict(type='str'),
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list', aliases=['column']),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
idxname = module.params["idxname"]
state = module.params["state"]
concurrent = module.params["concurrent"]
table = module.params["table"]
idxtype = module.params["idxtype"]
columns = module.params["columns"]
cond = module.params["cond"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
if concurrent and cascade:
module.fail_json(msg="Cuncurrent mode and cascade parameters are mutually exclusive")
if state == 'present':
if not table:
module.fail_json(msg="Table must be specified")
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Set defaults:
changed = False
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
#
# check_mode start
if module.check_mode:
if state == 'present' and index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'present' and not index.exists:
kw['changed'] = True
module.exit_json(**kw)
elif state == 'absent' and not index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'absent' and index.exists:
kw['changed'] = True
module.exit_json(**kw)
# check_mode end
#
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
else:
changed = index.drop(schema, cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not kw['valid']:
db_connection.rollback()
module.warn("Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
db_connection.commit()
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
Parrot-Developers/bybop | src/Bybop_NetworkAL.py | 2 | 4049 | import socket
import struct
import threading
class DataType:
ACK=1
DATA=2
DATA_LOW_LATENCY=3
DATA_WITH_ACK=4
class NetworkAL(object):
"""
Alternate implementation of the ARNetworkAL protocol, for Wifi devices.
This implementations is fully compliant with the protocol, and has no major
limiations.
This implementation uses a thread to do background reads from the socket, and
send data to the application through a listener. This listener must implement a
'data_received' function, which will receive the following arguments:
- type : The type of data received (ack, data, low latency, data with ack)
- buf : The buffer on which this data was retrieved
- seq : The sequence number of the data
- recv_data : The actual data, as a packed string (use the struct module to unpack)
And a 'did_disconnect' function, without arguments, which will be called if the product
does not send any data on the network (probably because we lost the network link, or
because the product has run out of battery)
"""
def __init__(self, ip, c2d_port, d2c_port, listener):
"""
Create and start a new instance of ARNetworkAL.
Arguments:
- ip (string) : The device address
- c2d_port : The remove reading port
- d2c_port : The local reading port
- listener : A listener which will have its data_received function called
when a data is received from the network.
"""
self._ip = ip
self._c2d_port = int(c2d_port)
self._d2c_port = int(d2c_port)
self._listener = listener
self._alive = False
self._running = False
self._thread = None
self.start()
def stop(self):
"""
Stop the current ARNetworkAL instance.
Once stopped, an instance can be restarded with the start method.
"""
if self._running:
self._alive = False
self._send_sock.close()
def start(self):
"""
Start the current ARNetworkAL instance.
This function has no effect if the instance is already started.
"""
if self._running:
return
self._alive = True
self._send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._recv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._recv_sock.settimeout(5.0)
self._recv_sock.bind(('0.0.0.0', self._d2c_port))
self._thread = threading.Thread(target=self._read_loop)
self._thread.start()
self._running = True
def send_data(self, type, buf, seq, data):
"""
Send the given data to the remote ARNetworkAL.
This function returns a boolean indicating whether the send worked.
This boolean is not an acknowlege, just an indicator that the socket
write did not fail.
Arguments:
- type : The type of data (ack, data, low latency, data with ack)
- buf : The target buffer for the data
- seq : The sequence number of the data
- data : The actual data (ususally a string packed with the struct module)
"""
sock_data = struct.pack('<BBBI', type, buf, seq, len(data) + 7)
sock_data += data
try:
self._send_sock.sendto(sock_data, (self._ip, self._c2d_port))
except:
return False
return True
def _read_loop(self):
while self._alive:
try:
sock_data, _ = self._recv_sock.recvfrom(66000)
except Exception as e:
break
the_data = sock_data
while the_data:
(type, buf, seq, size) = struct.unpack('<BBBI', the_data[0:7])
recv_data = the_data[7:size]
self._listener.data_received(type, buf, seq, recv_data)
the_data = the_data[size:]
self._recv_sock.close()
self._listener.did_disconnect()
self._running = False
| bsd-3-clause |
Serag8/Bachelor | google_appengine/lib/django-1.5/django/contrib/comments/views/moderation.py | 210 | 5204 | from __future__ import absolute_import
from django import template
from django.conf import settings
from django.contrib import comments
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.comments import signals
from django.contrib.comments.views.utils import next_redirect, confirmation_view
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, fallback=next or 'comments-delete-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, fallback=next or 'comments-approve-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
# Confirmation views.
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
| mit |
gavoski/audacity | lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/xlcxx.py | 330 | 1222 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_xlcxx(conf):
cxx=conf.find_program(['xlc++_r','xlc++'],var='CXX')
cxx=conf.cmd_to_list(cxx)
conf.get_xlc_version(cxx)
conf.env.CXX_NAME='xlc++'
conf.env.CXX=cxx
@conf
def xlcxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']=[]
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']=[]
v['LINKFLAGS_cxxprogram']=['-Wl,-brtl']
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-G','-Wl,-brtl,-bexpfull']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=[]
v['cxxstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_xlcxx()
conf.find_ar()
conf.xlcxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| gpl-2.0 |
amw2104/fireplace | fireplace/cards/classic/paladin.py | 1 | 2853 | from ..utils import *
##
# Hero Powers
# Reinforce (Uther Lightbringer)
class CS2_101:
activate = Summon(CONTROLLER, "CS2_101t")
# Reinforce (Uther Skin 1)
class CS2_101_H1:
activate = CS2_101.activate
##
# Minions
# Guardian of Kings
class CS2_088:
play = Heal(FRIENDLY_HERO, 6)
# Argent Protector
class EX1_362:
play = GiveDivineShield(TARGET)
# Aldor Peacekeeper
class EX1_382:
play = Buff(TARGET, "EX1_382e")
class EX1_382e:
atk = SET(1)
# Tirion Fordring
class EX1_383:
deathrattle = Summon(CONTROLLER, "EX1_383t")
##
# Spells
# Blessing of Might
class CS2_087:
play = Buff(TARGET, "CS2_087e")
CS2_087e = buff(atk=3)
# Holy Light
class CS2_089:
play = Heal(TARGET, 6)
# Blessing of Kings
class CS2_092:
play = Buff(TARGET, "CS2_092e")
CS2_092e = buff(+4, +4)
# Consecration
class CS2_093:
play = Hit(ENEMY_CHARACTERS, 2)
# Hammer of Wrath
class CS2_094:
play = Hit(TARGET, 3), Draw(CONTROLLER)
# Divine Favor
class EX1_349:
play = DrawUntil(CONTROLLER, Count(ENEMY_HAND))
# Lay on Hands
class EX1_354:
play = Heal(TARGET, 8), Draw(CONTROLLER) * 3
# Blessed Champion
class EX1_355:
play = Buff(TARGET, "EX1_355e")
class EX1_355e:
atk = lambda self, i: i * 2
# Humility
class EX1_360:
play = Buff(TARGET, "EX1_360e")
class EX1_360e:
atk = SET(1)
# Blessing of Wisdom
class EX1_363:
play = Buff(TARGET, "EX1_363e")
class EX1_363e:
events = Attack(OWNER).on(Draw(CONTROLLER))
# Blessing of Wisdom (Unused)
class EX1_363e2:
events = Attack(OWNER).on(Draw(OWNER_OPPONENT))
# Holy Wrath
class EX1_365:
play = Draw(CONTROLLER).then(Hit(TARGET, COST(Draw.CARD)))
# Hand of Protection
class EX1_371:
play = GiveDivineShield(TARGET)
# Avenging Wrath
class EX1_384:
def play(self):
count = self.controller.get_spell_damage(8)
yield Hit(RANDOM_ENEMY_CHARACTER, 1) * count
# Equality
class EX1_619:
play = Buff(ALL_MINIONS, "EX1_619e")
class EX1_619e:
max_health = SET(1)
##
# Secrets
# Noble Sacrifice
class EX1_130:
secret = Attack(ENEMY_MINIONS).on(FULL_BOARD | (
Reveal(SELF), Retarget(Attack.ATTACKER, Summon(CONTROLLER, "EX1_130a"))
))
# Eye for an Eye
class EX1_132:
secret = Damage(FRIENDLY_HERO).on(
Reveal(SELF), Hit(ENEMY_HERO, Damage.AMOUNT)
)
# Redemption
class EX1_136:
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, Copy(Death.ENTITY)).then(SetCurrentHealth(Summon.CARD, 1))
))
# Repentance
class EX1_379:
secret = Play(OPPONENT, MINION | HERO).after(
Reveal(SELF), Buff(Play.CARD, "EX1_379e")
)
class EX1_379e:
max_health = SET(1)
##
# Weapons
# Truesilver Champion
class CS2_097:
events = Attack(FRIENDLY_HERO).on(Heal(FRIENDLY_HERO, 2))
# Sword of Justice
class EX1_366:
events = Summon(CONTROLLER, MINION).after(
Buff(Summon.CARD, "EX1_366e"),
Hit(SELF, 1)
)
EX1_366e = buff(+1, +1)
| agpl-3.0 |
google/contentbox | third_party/django/contrib/formtools/wizard/storage/base.py | 216 | 3949 | from django.core.files.uploadedfile import UploadedFile
from django.utils.datastructures import MultiValueDict
from django.utils.functional import lazy_property
from django.utils import six
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
class BaseStorage(object):
step_key = 'step'
step_data_key = 'step_data'
step_files_key = 'step_files'
extra_data_key = 'extra_data'
def __init__(self, prefix, request=None, file_storage=None):
self.prefix = 'wizard_%s' % prefix
self.request = request
self.file_storage = file_storage
def init_data(self):
self.data = {
self.step_key: None,
self.step_data_key: {},
self.step_files_key: {},
self.extra_data_key: {},
}
def reset(self):
self.init_data()
def _get_current_step(self):
return self.data[self.step_key]
def _set_current_step(self, step):
self.data[self.step_key] = step
current_step = lazy_property(_get_current_step, _set_current_step)
def _get_extra_data(self):
return self.data[self.extra_data_key]
def _set_extra_data(self, extra_data):
self.data[self.extra_data_key] = extra_data
extra_data = lazy_property(_get_extra_data, _set_extra_data)
def get_step_data(self, step):
# When reading the serialized data, upconvert it to a MultiValueDict,
# some serializers (json) don't preserve the type of the object.
values = self.data[self.step_data_key].get(step, None)
if values is not None:
values = MultiValueDict(values)
return values
def set_step_data(self, step, cleaned_data):
# If the value is a MultiValueDict, convert it to a regular dict of the
# underlying contents. Some serializers call the public API on it (as
# opposed to the underlying dict methods), in which case the content
# can be truncated (__getitem__ returns only the first item).
if isinstance(cleaned_data, MultiValueDict):
cleaned_data = dict(cleaned_data.lists())
self.data[self.step_data_key][step] = cleaned_data
@property
def current_step_data(self):
return self.get_step_data(self.current_step)
def get_step_files(self, step):
wizard_files = self.data[self.step_files_key].get(step, {})
if wizard_files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
files = {}
for field, field_dict in six.iteritems(wizard_files):
field_dict = field_dict.copy()
tmp_name = field_dict.pop('tmp_name')
files[field] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict)
return files or None
def set_step_files(self, step, files):
if files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
if step not in self.data[self.step_files_key]:
self.data[self.step_files_key][step] = {}
for field, field_file in six.iteritems(files or {}):
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
'tmp_name': tmp_filename,
'name': field_file.name,
'content_type': field_file.content_type,
'size': field_file.size,
'charset': field_file.charset
}
self.data[self.step_files_key][step][field] = file_dict
@property
def current_step_files(self):
return self.get_step_files(self.current_step)
def update_response(self, response):
pass
| apache-2.0 |
renyi533/tensorflow | tensorflow/python/keras/mixed_precision/experimental/policy.py | 1 | 25763 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. When the variable dtype does not match the compute
dtype, variables will be automatically casted to the compute dtype to avoid
type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the
variable dtype, not the compute dtype. See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on how to use mixed precision.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
>>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # Dense layers use global policy of 'mixed_float16', which does
... # computations in float16 while keeping variables in float32.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... # Softmax should be done in float32 for numeric stability. We pass
... # dtype='float32' to use float32 instead of the global policy.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... tf.keras.layers.Dense(10, dtype=policy),
... tf.keras.layers.Dense(10, dtype=policy),
... # Softmax should be done in float32 for numeric stability.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on using mixed precision
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
>>> tf.keras.mixed_precision.experimental.set_policy('float64')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # All layers use global policy of 'float64', which does computations
... # and creates variables in float64.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... tf.keras.layers.Activation('softmax')
... ])
>>> # Optionaly set policy back to float32 if any other models use float32
>>> tf.keras.mixed_precision.experimental.set_policy('float32')
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> # `layer` casts it's inputs to its compute dtype, which is float32, and
>>> # does computations in float32.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer, it is recommended to accept tensors only in the
first argument. This way, all tensors are casted to the layer's compute dtype.
`MyLayer` should therefore be written as:
>>> class MyLayer(tf.keras.layers.Layer):
... # Now, all tensor inputs will be casted.
... def call(self, inputs):
... a, b = inputs
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer((a, b))
>>> x.dtype
tf.float64
>>> y.dtype
tf.float64
Other arguments are not automatically casted for technical reasons, but this
may change in a future minor release.
A layer subclass can prevent its inputs from being autocasted by passing
`autocast=False` to the layer constructor. For example:
>>> class NonAutoCastingLayer(tf.keras.layers.Layer):
... def __init__(self, **kwargs):
... kwargs['autocast'] = False
... super(NonAutoCastingLayer, self).__init__(**kwargs)
... def call(self, inp):
... return inp
>>> x = tf.ones((4, 4, 4, 4), dtype='float32')
>>> layer = NonAutoCastingLayer(dtype='float64')
>>> y = layer(x) # Will not cast inputs to it's compute dtype of float64
>>> y.dtype
tf.float32
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
This wrapper is identical to the original variable except it casts itself to
the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`,
the variable is not casted.
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`:
>>> class MyLayer(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.x = self.add_weight('x')
... self.y = self.add_weight('y', experimental_autocast=False)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyLayer(dtype=policy)
>>> layer.build((2, 2))
>>> layer.x
<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...>
>>> layer.y
<tf.Variable 'y:0' shape=() dtype=float32, numpy=...>
Passing `experimental_autocast=False` is useful for layers which may
internally do some math in the variable dtype instead of the compute dtype.
For example, you may wish to compute variable statistics, such as mean and
variance, in the variable dtype.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
For example, this simple dense layer does not require any additional work to
support mixed precision or float64. Keras automatically casts the inputs and
variable to the appropriate dtype.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... return tf.matmul(inputs, self.kernel)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyDense(dtype=policy)
>>> x = np.random.rand(10, 10)
>>> y = layer(x)
>>> y.dtype
tf.float16
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.constant`. In such cases, you must create the tensor of the correct dtype.
For example, suppose you modify the `MyDense` layer to add a random number to
the output using `tf.random.normal`. You must pass the input dtype to
`tf.random.normal` to ensure the dtypes match.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return tf.matmul(inputs, self.kernel) + rand
>>>
>>> layer = MyDense(dtype=policy)
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError`
would have occurred. This is because the dtype defaults to `"float32"`, so the
layer would only work if the inputs were float32.
### The deprecated "infer" policy
In addition to the above mentioned policies, a policy can also be "infer".
This Policy is deprecated, and it is not recommended. When a layer has an
infer policy, it will infer the computation and variable dtype from the first
input the first time the layer is called. Once the layer is called for the
first time, the layer's policy will change to the dtype of the first input.
In TensorFlow 1, only the "infer" policy is available.
"""
def __init__(self, name, loss_scale=USE_DEFAULT):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, the default
loss scale, and has no additional effect on the Policy. The compute and
variable dtypes can only be specified through `name`, and cannot be
specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which
uses a `FixedLossScale`), or the string "dynamic" (which uses a
`DynamicLossScale`). Defaults to using no loss scaling unless `name` is
"mixed_float16", in which case this defaults to "dynamic". Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit`, `Model.train_on_batch`, and other similar methods.
"""
if isinstance(name, dtypes.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, six.string_types):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if loss_scale == USE_DEFAULT:
loss_scale = 'dynamic' if name == 'mixed_float16' else None
self._using_default_loss_scale = True
else:
self._using_default_loss_scale = False
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warn('Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == 'infer':
return None, None
try:
dtype = dtypes.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include include 'mixed_float16', "
"'mixed_bfloat16', and the name of any dtype such as "
"'float32'." % (name,))
# six.raise_from suppresses the original TypeError from being raised
six.raise_from(ValueError(error), None)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
[b]float16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32, or some other device-internal
intermediate format with higher precision than [b]float16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul
will do use float32 intermediate math. The performance benefit of float16 is
still apparent, due to increased memory bandwidth and the fact modern GPUs
have specialized hardware for computing matmuls on float16 while still
keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, or None if the compute dtype should be
inferred from the inputs.
"""
return self._compute_dtype
@property
def should_cast_variables(self):
"""Returns True if variables should be casted.
This is true if the variable dtype is not the same as the compute dtype.
Returns:
True, if variables should be casted.
"""
return self.variable_dtype != self.compute_dtype
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def get_config(self):
config = {
'name': self.name
}
if not self._using_default_loss_scale:
# We only include the loss scale if the default loss scale is not used.
# This allows us to change the loss scale config format without breaking
# users who use the default loss scale.
config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'loss_scale' in config and isinstance(config['loss_scale'], dict):
config = config.copy()
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.experimental.global_policy')
def global_policy():
"""Returns the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no policy has been set with
`keras.mixed_precision.experimental.set_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults
to float32), or an "infer" policy in TensorFlow 1.
See `keras.mixed_precision.experimental.Policy` for more information.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('infer')
return _global_policy
def policy_defaults_to_floatx():
"""Returns True if `global_policy()` will use the current value of floatx."""
return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled()
def _check_if_mixed_precision_graph_rewrite_is_enabled():
# TODO(reedwm): Update this comment once the Keras API is complete.
if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
raise ValueError(
'The mixed precision policy cannot be set, because the mixed '
'precision graph rewrite has already been enabled.\n'
'At most, one of the following functions can be called:\n\n'
' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.')
@keras_export('keras.mixed_precision.experimental.set_policy')
def set_policy(policy):
"""Sets the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no global policy is set, layers will
instead default to a Policy constructed from `tf.keras.backend.floatx()` in
TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy.
See `keras.mixed_precision.experimental.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy..
"""
global _global_policy
_check_if_mixed_precision_graph_rewrite_is_enabled()
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and
policy.compute_dtype):
raise ValueError(
'The global policy can only be set to a non-infer policy in TensorFlow '
'2')
_global_policy = policy
mixed_precision_global_state.using_default_mixed_precision_policy = (
_global_policy is None)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
dtypes.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy does not cause the layer/model to have
additional behavior, such as loss scaling.
The "infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a sublcass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
list(policy.get_config().keys()) == ['name'] and
(policy.name == 'infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == 'infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('infer')
module_objects = {'Policy': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| apache-2.0 |
Ted1993/Flasky | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
PrismTech/opensplice | build/docs/DDSTutorial/source/conf.py | 2 | 8804 | # -*- coding: utf-8 -*-
#
# Vortex OpenSplice Tutorial build configuration file, created by
# ReST Editor on 24-Mar-2015
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# import liteconfig
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.todo']
#extensions = ['sphinx.ext.todo', 'numfig']
extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig']
def setup(app):
app.add_config_value('rmi_languages', '', True)
#rmi_languages = 'C++ and Java'
rmi_languages = 'C++'
#rmi_languages = 'Java'
rst_prolog = """
.. |rmi_langs| replace:: C++
.. |product_name| replace:: OpenSplice
"""
#.. |rmi_langs| replace:: C++ and Java
# Add any paths that contain templates here, relative to this directory.
templates_path = [u'_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = u'utf-8-sig'
# The master toctree document.
master_doc = u'index'
# General information about the project.
project = u'The Data Distribution Service Tutorial'
this_year = time.strftime( '%Y' )
copyright = u'{y}, ADLINK Technology Limited'.format( y = this_year )
print 'Copyright string is:', copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u's'
#version = liteconfig.version
#version = u'6.x'
# The full version, including alpha/beta/rc tags.
#release = u's'
#release = version
#release = u'.0'
#print 'Short version string is:', version
#print 'Full version string is:', release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = u'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# Force blank date with today = ' ' (space, not empty string)
today = ' '
# ***************
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = u'sphinxdoc'
html_theme = u'vortextheme'
html_theme_path = ['../../.']
#build theme directory in lite using environment variable, so shared amongst books
# insight team can delete,
#html_theme_path = [os.environ['VL_HOME'] + '/build/docs']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
html_title = 'The Data Distribution Service Tutorial'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
html_short_title = 'DDS Tutorial'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = './images/Vortex_logo_2014.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
html_static_path = [u'_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'The Data Distribution Service Tutorial'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = u'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = u'10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'OpenSplice_DDSTutorial.tex', u'The DDS Tutorial', u'', 'manual', True)]
# Note 'author' field empty
# Added 'True' to end of generated line to suppress 'Index & Tables'
# A dictionary that contains LaTeX snippets that override those Sphinx usually
# puts into the generated .tex files.
latex_elements = { 'babel': '\\usepackage[english]{babel}' }
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = 'images/Vortex-Cover.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# THIS GETS RID OF BLANK PAGES AT ENDS OF CHAPTERS & ToC
latex_elements = {
'classoptions': ',openany, oneside',
'babel': '\\usepackage[english]{babel}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'DDS_Tutorial', u'DDS_Tutorial Documentation', [u'ADLINK Technology Limited'], 1)]
# -- Additional options --------------------------------------------------------
todo_include_todos = True
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/networkmanagement/v1/networkmanagement-v1-py/google/cloud/network_management_v1/services/reachability_service/transports/grpc.py | 1 | 21150 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.network_management_v1.types import connectivity_test
from google.cloud.network_management_v1.types import reachability
from google.longrunning import operations_pb2 # type: ignore
from .base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
class ReachabilityServiceGrpcTransport(ReachabilityServiceTransport):
"""gRPC backend transport for ReachabilityService.
The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_connectivity_tests(self) -> Callable[
[reachability.ListConnectivityTestsRequest],
reachability.ListConnectivityTestsResponse]:
r"""Return a callable for the list connectivity tests method over gRPC.
Lists all Connectivity Tests owned by a project.
Returns:
Callable[[~.ListConnectivityTestsRequest],
~.ListConnectivityTestsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connectivity_tests' not in self._stubs:
self._stubs['list_connectivity_tests'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/ListConnectivityTests',
request_serializer=reachability.ListConnectivityTestsRequest.serialize,
response_deserializer=reachability.ListConnectivityTestsResponse.deserialize,
)
return self._stubs['list_connectivity_tests']
@property
def get_connectivity_test(self) -> Callable[
[reachability.GetConnectivityTestRequest],
connectivity_test.ConnectivityTest]:
r"""Return a callable for the get connectivity test method over gRPC.
Gets the details of a specific Connectivity Test.
Returns:
Callable[[~.GetConnectivityTestRequest],
~.ConnectivityTest]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connectivity_test' not in self._stubs:
self._stubs['get_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/GetConnectivityTest',
request_serializer=reachability.GetConnectivityTestRequest.serialize,
response_deserializer=connectivity_test.ConnectivityTest.deserialize,
)
return self._stubs['get_connectivity_test']
@property
def create_connectivity_test(self) -> Callable[
[reachability.CreateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connectivity test method over gRPC.
Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Returns:
Callable[[~.CreateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connectivity_test' not in self._stubs:
self._stubs['create_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/CreateConnectivityTest',
request_serializer=reachability.CreateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connectivity_test']
@property
def update_connectivity_test(self) -> Callable[
[reachability.UpdateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connectivity test method over gRPC.
Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Returns:
Callable[[~.UpdateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connectivity_test' not in self._stubs:
self._stubs['update_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/UpdateConnectivityTest',
request_serializer=reachability.UpdateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connectivity_test']
@property
def rerun_connectivity_test(self) -> Callable[
[reachability.RerunConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the rerun connectivity test method over gRPC.
Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Returns:
Callable[[~.RerunConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'rerun_connectivity_test' not in self._stubs:
self._stubs['rerun_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/RerunConnectivityTest',
request_serializer=reachability.RerunConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['rerun_connectivity_test']
@property
def delete_connectivity_test(self) -> Callable[
[reachability.DeleteConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connectivity test method over gRPC.
Deletes a specific ``ConnectivityTest``.
Returns:
Callable[[~.DeleteConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connectivity_test' not in self._stubs:
self._stubs['delete_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/DeleteConnectivityTest',
request_serializer=reachability.DeleteConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connectivity_test']
__all__ = (
'ReachabilityServiceGrpcTransport',
)
| apache-2.0 |
VlachosGroup/VlachosGroupAdditivity | pgradd/DrawMol.py | 1 | 2230 | """
=========================================
Defenition to draw RDKIT mol object (:mod:`pgradd.DrawMol`)
=========================================
Coverts a rdkit mol object to a svg image and display.
"""
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from IPython.display import SVG, display
# http://rdkit.blogspot.com/2015/02/new-drawing-code.html
def moltosvg(mol, highlight=[], molSize=(400, 400), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except Exception:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# Atom Label
opts = drawer.drawOptions()
# Atom name and index
for i in range(mol.GetNumAtoms()):
opts.atomLabels[i] = mol.GetAtomWithIdx(i).GetSymbol()+str(i)
# radicals and charges
for atom in mol.GetAtoms():
nr = atom.GetNumRadicalElectrons()
nc = atom.GetFormalCharge()
if nr > 0:
string = atom.GetSymbol() + ':'*divmod(nr, 2)[0] +\
'.'*divmod(nr, 2)[1]
opts.atomLabels[atom.GetIdx()] += string
elif nc == 1:
string = atom.GetSymbol() + '+'
opts.atomLabels[atom.GetIdx()] += string
elif nc > 1:
string = atom.GetSymbol() + '+' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
elif nc == -1:
string = atom.GetSymbol() + '-'
opts.atomLabels[atom.GetIdx()] += string
elif nc < -1:
string = atom.GetSymbol() + '-' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
# highlight
if highlight:
drawer.DrawMolecule(mc, highlightAtoms=highlight)
else:
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
svg.replace('svg:', '')
display(SVG(svg))
| mit |
aldebaran/qibuild | python/qitest/parsers.py | 1 | 7334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Collection of parser fonctions for qitests actions """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import qisys.parsers
import qitest.project
import qibuild.parsers
class EmptyTestListException(Exception):
""" No test to run exception """
pass
def test_parser(parser, with_num_jobs=True):
""" Test Parser """
qisys.parsers.worktree_parser(parser)
group = parser.add_argument_group("test options")
group.add_argument("--perf", dest="perf", action="store_true",
help="run perfs tests instead of pure tests.")
group.add_argument("-k", "--pattern", dest="patterns", action="append",
help="Filter tests matching these patterns")
group.add_argument("-x", "--exclude", dest="excludes", action="append",
help="Exclude test matching these patterns")
group.add_argument("-V", dest="verbose_tests", action="store_true",
help="display tests output")
group.add_argument("--valgrind", dest="valgrind", action="store_true",
help="run tests under valgrind")
group.add_argument("--nightmare", dest="nightmare", action="store_true",
help="run tests in shuffle and 20 times (apply only to gtest)")
group.add_argument("--coverage", dest="coverage", action="store_true",
help="run coverage")
group.add_argument("--ncpu", dest="num_cpus", default=-1, type=int,
help="set number of CPU each test is allowed to use (linux)")
group.add_argument("--nightly", action="store_true", dest="nightly")
group.add_argument("--break-on-failure", action="store_true", dest="break_on_failure",
help="Break on failure (for gtest only)")
group.add_argument("--repeat-until-fail", default=0, type=int, metavar="N",
help="Repeat tests until they fail (at most N times)")
group.add_argument("--qitest-json", dest="qitest_jsons", action="append")
group.add_argument("--test-output-dir", type=os.path.abspath,
dest="test_output_dir",
help="Generate XML test reports in the given directory "
"(instead of build-<platform>/sdk/test-results)")
group.add_argument("--coverage-output-dir", dest="coverage_output_dir",
help="Generate XML and HTML coverage reports in the given "
"directory (instead of build-<platform>/sdk/coverage-results)")
group.add_argument("--root-output-dir", dest="test_output_dir", metavar="ROOT_OUTPUT_DIR",
help="same as --test-output-dir (deprecated)")
group.add_argument("--no-capture", dest="capture", action="store_false")
group.add_argument("--ignore-timeouts", dest="ignore_timeouts", action="store_true",
help="Ignore timeouts when running tests")
group.add_argument("--lf", "--last-failed", dest="last_failed", action="store_true",
help="Run the failing test from previous run")
group.add_argument("--allow-no-test", dest="allow_no_test", action="store_true",
help="Don't fail if no tests to run")
parser.set_defaults(nightly=False, capture=True, last_failed=False,
ignore_timeouts=False)
if with_num_jobs:
qisys.parsers.parallel_parser(group, default=1)
return group
def get_test_runner(args, build_project=None, qitest_json=None):
""" Get Test Runner """
test_project = None
if not qitest_json:
qitest_json = vars(args).get("qitest_json")
if not qitest_json:
candidate = os.path.join(os.getcwd(), "qitest.json")
if os.path.exists(candidate):
qitest_json = candidate
if qitest_json:
test_project = qitest.project.TestProject(qitest_json)
if not test_project:
if build_project:
test_project = build_project.to_test_project()
else:
return None
test_runner = qibuild.test_runner.ProjectTestRunner(test_project)
if build_project:
test_runner.cwd = build_project.sdk_directory
test_runner.env = build_project.build_worktree.get_env()
else:
test_runner.cwd = qisys.sh.to_native_path(os.path.dirname(qitest_json))
test_runner.patterns = args.patterns
test_runner.excludes = args.excludes
test_runner.perf = args.perf
test_runner.coverage = args.coverage
test_runner.break_on_failure = args.break_on_failure
test_runner.valgrind = args.valgrind
test_runner.verbose = args.verbose_tests
test_runner.num_cpus = args.num_cpus
test_runner.num_jobs = args.num_jobs
test_runner.repeat_until_fail = args.repeat_until_fail
test_runner.nightly = args.nightly
test_runner.nightmare = args.nightmare
test_runner.test_output_dir = args.test_output_dir
test_runner.capture = args.capture
test_runner.last_failed = args.last_failed
test_runner.ignore_timeouts = args.ignore_timeouts
return test_runner
def parse_build_projects(args):
""" Parse Build Projects """
res = list()
try:
build_worktree = qibuild.parsers.get_build_worktree(args)
solve_deps = False
if args.use_deps:
solve_deps = True
build_projects = qibuild.parsers.get_build_projects(
build_worktree,
args, solve_deps=solve_deps)
for build_project in build_projects:
test_runner = None
try:
test_runner = get_test_runner(args, build_project=build_project)
except qibuild.project.NoQiTestJson:
pass
if test_runner:
res.append(test_runner)
except (qisys.worktree.NotInWorkTree, qibuild.parsers.CouldNotGuessProjectName):
pass
return res
def get_test_runners(args):
""" Get Test Runners """
res = list()
qitest_jsons = args.qitest_jsons or list()
# first case: qitest.json in current working directory
test_runner = get_test_runner(args)
if test_runner:
res.append(test_runner)
# second case: qitest.json specified with --qitest-json
for qitest_json in qitest_jsons:
test_runner = get_test_runner(args, qitest_json=qitest_json)
res.append(test_runner)
# third case: parsing build projects
build_projects_runners = parse_build_projects(args)
# avoid appending a test_runner guessed from a build project
# when res already contains a test runner computed from a
# --qitest-json argument
known_cwds = [x.cwd for x in res]
for test_runner in build_projects_runners:
if test_runner.cwd not in known_cwds:
res.append(test_runner)
if args.coverage and not build_projects_runners:
raise Exception("""--coverage can only be used from a qibuild CMake project\n""")
elif args.coverage:
return build_projects_runners
if not res:
raise EmptyTestListException("Nothing found to test")
return res
| bsd-3-clause |
pekeler/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_pep352.py | 51 | 9655 | import unittest
import __builtin__
import exceptions
import warnings
from test.test_support import run_unittest
import os
from platform import system as platform_system
def ignore_message_warning():
"""Ignore the DeprecationWarning for BaseException.message."""
warnings.resetwarnings()
warnings.filterwarnings("ignore", "BaseException.message",
DeprecationWarning)
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.failUnless(issubclass(Exception, object))
def verify_instance_interface(self, ins):
with warnings.catch_warnings():
ignore_message_warning()
for attr in ("args", "message", "__str__", "__repr__",
"__getitem__"):
self.failUnless(hasattr(ins, attr),
"%s missing %s attribute" %
(ins.__class__.__name__, attr))
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set(x for x in dir(exceptions) if not x.startswith('_'))
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(__builtin__, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.failUnless(superclass_name in exc_set)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(__builtin__, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.failUnless(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except TypeError:
pass
self.failUnless(exc_name in exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.failUnlessEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "message", "str", "unicode", "repr",
"indexing")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.failUnlessEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 1], [exc.args[0], arg],
[exc.message, arg],
[str(exc), str(arg)], [unicode(exc), unicode(arg)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)], [exc[0],
arg])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), arg_count], [exc.args, args],
[exc.message, ''], [str(exc), str(args)],
[unicode(exc), unicode(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)],
[exc[-1], args[-1]])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
with warnings.catch_warnings():
ignore_message_warning()
results = ([len(exc.args), 0], [exc.args, tuple()],
[exc.message, ''],
[str(exc), ''], [unicode(exc), u''],
[repr(exc), exc.__class__.__name__ + '()'], [True, True])
self.interface_test_driver(results)
def test_message_deprecation(self):
# As of Python 2.6, BaseException.message is deprecated.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings('error')
try:
BaseException().message
except DeprecationWarning:
pass
else:
self.fail("BaseException.message not deprecated")
exc = BaseException()
try:
exc.message = ''
except DeprecationWarning:
pass
else:
self.fail("BaseException.message assignment not deprecated")
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise StandardError
except object_:
pass
except TypeError:
pass
except StandardError:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise StandardError
except (object_,):
pass
except TypeError:
return
except StandardError:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_classic(self):
# Raising a classic class is okay (for now).
class ClassicClass:
pass
try:
raise ClassicClass
except ClassicClass:
pass
except:
self.fail("unable to raise classic class")
try:
raise ClassicClass()
except ClassicClass:
pass
except:
self.fail("unable to raise class class instance")
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_string(self):
# Catching a string should trigger a DeprecationWarning.
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.filterwarnings("error")
str_exc = "spam"
try:
try:
raise StandardError
except str_exc:
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception did not raise "
"DeprecationWarning")
# Make sure that even if the string exception is listed in a tuple
# that a warning is raised.
try:
try:
raise StandardError
except (AssertionError, str_exc):
pass
except DeprecationWarning:
pass
except StandardError:
self.fail("catching a string exception specified in a tuple did "
"not raise DeprecationWarning")
def test_main():
run_unittest(ExceptionClassTests, UsageTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
ahmed-mahran/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Part.py | 37 | 19486 | #._cv_part guppy.heapy.Part
class Format(object):
__slots__ = 'impl', 'mod'
def __init__(self, impl):
self.impl = impl
self.mod = impl.mod
def get_formatted_row(self, row):
fr = self.get_stat_data(row)
rows = []
rs = row.name.split('\n')
subsequent_indent = len(fr)*' '
rows.extend(self.mod.wrap(
fr+rs[0],
width=self.mod.line_length,
subsequent_indent=subsequent_indent))
for r in rs[1:]:
rows.extend(self.mod.wrap(
r,
width=self.mod.line_length,
initial_indent=subsequent_indent,
subsequent_indent=subsequent_indent))
return '\n'.join(rows)
def get_more_index(self, idx=None):
if idx is None:
idx = 0
idx += 10
return idx
def get_row_header(self):
impl = self.impl
if not (impl.count or impl.size):
return ''
sh = self.get_stat_header()
return self.mod.fill(
sh + self.impl.kindheader,
width=self.mod.line_length,
subsequent_indent=' '*len(sh))
def load_statrow_csk(self, r):
impl = self.impl
count, size, kind = r.split(' ', 2)
count = int(count)
size = int(size)
impl.cum_size += size
return StatRow(count, size, kind, impl.cur_index, impl.cum_size)
def load_statrow_sk(self, r):
impl = self.impl
size, kind = r.split(' ', 1)
size = int(size)
impl.cum_size += size
return StatRow(1, size, kind, impl.cur_index, impl.cum_size)
def ppob(self, ob, idx=None):
impl = self.impl
if idx is None:
label = self.get_label()
if label is not None:
print >>ob, label
idx = 0
if idx < 0:
idx = impl.numrows + startindex
it = impl.get_rows(idx)
print >>ob, self.get_row_header()
numrows = 0
for row in it:
form = self.get_formatted_row(row)
print >>ob, form
numrows += 1
if numrows >= 10:
nummore = impl.numrows - 1 - row.index
if nummore > 1:
print >>ob, \
"<%d more rows. Type e.g. '_.more' to view.>"%nummore
break
class SetFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
s = ''
return 'Partition of a set of %d object%s. Total size = %d bytes.'%(
impl.count, s, impl.size)
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count % Size % Cumulative % ')
def get_stat_data(self, row):
format = '%6d %6d %3d %8d %3d %9d %3d '
impl = self.impl
fr = format % (
row.index,
row.count, int('%.0f'%(row.count * 100.0/impl.count)),
row.size, int('%.0f'%(row.size * 100.0/impl.size)),
row.cumulsize, int('%.0f'%(row.cumulsize * 100.0/impl.size)),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class IdFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
s = ''
return (
'Set of %d %s object%s. Total size = %d bytes.'%(
impl.count, impl.kindname, s, impl.size))
return part
def get_rowdata(self, row):
return '%d %s'%(row.size, row.name)
def get_stat_header(self):
return (
' Index Size % Cumulative % ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %8d %5.1f %9d %5.1f '
fr = format % (
row.index,
row.size, (row.size * 100.0/impl.size),
row.cumulsize, row.cumulsize * 100.0/impl.size,
)
return fr
def load_statrow(self, r):
return self.load_statrow_sk(r)
class DiffFormat(Format):
__slots__ = ()
def _percent_of_b(self, size):
if self.impl.b_size != 0:
return '%9.3g'%(size*100.0/self.impl.b_size,)
else:
return ' (n.a.)'
def get_label(self):
impl = self.impl
x = (
'Summary of difference operation (A-B).\n'+
' Count Size\n'+
' A %6d %8d\n'%(impl.count+impl.b_count, impl.size+impl.b_size)+
' B %6d %8d\n'%(impl.b_count, impl.b_size)+
' A-B %6d %8d = %s %% of B\n'%(impl.count, impl.size, self._percent_of_b(impl.size)))
if impl.count or impl.size:
x += '\nDifferences by kind, largest absolute size diffs first.'
return x
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count Size Cumulative % of B ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %6d %8d %9d %s '
fr = format % (
row.index,
row.count,
row.size,
row.cumulsize,
self._percent_of_b(row.cumulsize),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class StatRow(object):
__slots__ = 'count', 'size', 'name', 'index', 'cumulsize'
def __init__(self, count, size, name, index=None, cumulsize=None):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
class PartRow(StatRow):
__slots__ = 'set', 'kind'
def __init__(self, count, size, name, index, cumulsize, set, kind):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
self.set = set
self.kind = kind
class Stat:
def __init__(self, mod, get_trows, firstheader=''):
self.mod = mod
self._hiding_tag_ = mod._hiding_tag_
self.get_trows = get_trows
self.firstheader = firstheader
self.it = iter(get_trows())
self.cur_index = 0
self.cum_size = 0
self.rows = []
r = self.get_next()
while r and not r.startswith('.r:'):
name = r[1:r.index(':')]
value = r[r.index(':')+1:].strip()
try:
value = int(value)
except ValueError:
pass
setattr(self, name, value)
r = self.get_next()
self.format_name = self.format
self.format_class = getattr(self.mod, self.format)
self.format = self.format_class(self)
self.timemade = float(self.timemade)
def __getitem__(self, idx):
if isinstance(idx, (int, long)):
if idx < 0:
idx = self.numrows + idx
if not (0 <= idx < self.numrows):
raise IndexError, 'Stat index out of range.'
rows = [self.get_row(idx)]
elif isinstance(idx, slice):
start, stop, step = idx.indices(self.numrows)
rows = [self.get_row(idx) for idx in range(start, stop, step)]
else:
raise IndexError, 'Stat indices must be integers or slices.'
count = 0
size = 0
for r in rows:
count += r.count
size += r.size
trows = [
'.loader: _load_stat',
'.format: %s'%self.format_name,
'.timemade: %f'%self.timemade,
'.count: %d'%count,
'.size: %d'%size,
'.kindheader: %s'%self.kindheader,
'.kindname: %s'%self.kindname,
'.numrows: %d'%len(rows),
]
if getattr(self, 'b_count', None) is not None:
trows.append('.b_count: %d'%self.b_count)
trows.append('.b_size: %d'%self.b_size)
for r in rows:
trows.append('.r: %s'%self.format.get_rowdata(r))
return self.mod.load(trows)
def __len__(self):
return self.numrows
def __repr__(self):
ob = self.mod.output_buffer()
self.ppob(ob)
return self.firstheader + ob.getvalue().rstrip()
def __sub__(self, other):
if not isinstance(other, Stat):
raise TypeError, 'Can only take difference with other Stat instance.'
if self.kindheader != other.kindheader:
raise ValueError, 'Mismatching table kind header, %r vs %r.'%(
self.kindheader, other.kindheader)
rows = []
otab = {}
stab = {}
for r in other.get_rows():
o = otab.get(r.name)
if o:
otab[r.name] = StatRow(r.count+o.count, r.size+o.size, r.name, o.index, None)
else:
otab[r.name] = r
for r in self.get_rows():
o = otab.get(r.name)
if o:
del otab[r.name]
count = r.count - o.count
size = r.size - o.size
else:
count = r.count
size = r.size
if count == 0 and size == 0:
continue
sr = stab.get(r.name)
if sr:
sr.count += count
sr.size += size
else:
sr = StatRow(count, size, r.name)
stab[sr.name] = sr
rows.append(sr)
rs = otab.values()
rs.sort(lambda x,y:cmp(x.index, y.index)) # Preserve orig. order
for r in rs:
sr = StatRow(-r.count, -r.size, r.name)
assert sr.name not in stab
rows.append(sr)
rows.sort(lambda x,y:cmp(abs(y.size), abs(x.size)))
cumulcount = 0
cumulsize = 0
for r in rows:
cumulcount += r.count
cumulsize += r.size
r.cumulsize = cumulsize
trows = [
'.loader: _load_stat',
'.format: DiffFormat',
'.timemade: %f'%self.mod.time.time(),
'.b_count: %d'%other.count,
'.b_size: %d'%other.size,
'.count: %d'%cumulcount,
'.size: %d'%cumulsize,
'.kindheader: %s'%self.kindheader,
'.kindname: %s'%self.kindname,
'.numrows: %d'%len(rows),
]
for r in rows:
trows.append('.r: %d %d %s'%(r.count, r.size, r.name))
return self.mod.load(trows)
def dump(self, fn, mode='a'):
if not hasattr(fn, 'write'):
f = open(fn, mode)
else:
f = fn
try:
for r in self.get_trows():
if not r[-1:] == '\n':
r += '\n'
f.write(r)
end = '.end: .loader: %s\n'%self.loader
if r != end:
f.write(end)
finally:
if f is not fn:
f.close()
def _get_more(self):
return self.mod.basic_more_printer(self, self)
more = property(_get_more)
def get_more_index(self, idx=None):
return self.format.get_more_index(idx)
def get_next(self):
try:
r = self.it.next()
except StopIteration:
r = None
else:
r = r.rstrip('\n')
self.last = r
return r
def get_row(self, idx):
while idx >= len(self.rows):
self.parse_next_row()
return self.rows[idx]
def get_rows(self, idx = None):
if idx is None:
idx = 0
while idx < self.numrows:
try:
row = self.get_row(idx)
except IndexError:
return
else:
yield row
idx += 1
def get_rows_of_kinds(self, kinds):
# Return the rows with names in sequence kinds of unique names
# in that order. None if no such kind.
kindtab = {}
N = len(kinds)
res = [None] * len(kinds)
for i, kind in enumerate(kinds):
kindtab[kind] = i
assert len(kindtab) == N
n = 0
for row in self.get_rows():
idx = kindtab.get(row.name)
if idx is not None:
res[idx] = row
n += 1
if n >= N:
break
return res
def get_rows_n_and_other(self, N, sortby='Size'):
# Get N rows, the largest first
# mix in an '<Other>' row at a sorted position
# Size is either size if sortby = 'Size',
# or count if sortby = 'Count'.
# Returns a NEW LIST (caller may modify/sort it)
if sortby not in ('Size', 'Count'):
raise ValueError, "Argument 'sortby' must be 'Size' or 'Count'."
# Rows are already sorted by Size, largest first.
# If they want by Count, we need to resort them.
rows = self.get_rows()
if sortby == 'Count':
rows = list(rows)
rows.sort(lambda x, y: cmp(y.count, x.count))
retrows = []
cumulcount = 0
cumulsize = 0
for (i, r) in enumerate(rows):
if i >= N:
othercount = self.count - cumulcount
othersize = self.size - cumulsize
other = StatRow(othercount,
othersize,
'<Other>')
if sortby == 'Size':
for (i, r) in enumerate(retrows):
if r.size < othersize:
retrows[i:i] = [other]
break
else:
retrows.append(other)
elif sortby == 'Count':
for (i, r) in enumerate(retrows):
if r.count < othercount:
retrows[i:i] = [other]
break
else:
retrows.append(other)
else:
assert 0
break
cumulcount += r.count
cumulsize += r.size
retrows.append(r)
else:
assert cumulcount == self.count
assert cumulsize == self.size
return retrows
def parse_next_row(self):
r = self.last
if not r:
raise IndexError, 'Row index out of range.'
if r.startswith('.r: '):
r = r[4:]
sr = self.format.load_statrow(r)
self.cur_index += 1
self.rows.append(sr)
self.get_next()
return
elif r.startswith('.end'):
raise IndexError, 'Row index out of range.'
else:
raise SyntaxError
def ppob(self, ob, idx=None):
return self.format.ppob(ob, idx)
class Partition:
def __init__(self, mod, set, er):
self.mod = mod
self.set = set
self.er = er
self._hiding_tag_ = mod._hiding_tag_
self.timemade = mod.time.time()
def __iter__(self):
# The default iteration is over the sets
# To iterate over rows (if more info is needed), get_rows() is available.
return self.get_sets()
def get_more_index(self, idx=None):
return self.format.get_more_index(idx)
def get_rows(self, rowindex = None):
# Iterator over rows
if rowindex is None:
rowindex = 0
while 1:
try:
row = self.get_row(rowindex)
except IndexError:
return
else:
yield row
rowindex += 1
def get_set(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(self.numrows)
ns = self.get_nodeset(start, stop, step)
return self.mod.idset(ns, er=self.er)
else:
if index < 0:
index += self.numrows
return self.get_rowset(index)
def get_sets(self, index=None):
for idx in range(self.numrows):
yield self.get_rowset(idx)
def get_stat(self):
# Avoid any references into the set!
trows = list(self.get_trows())
def get_trows():
return trows
return self.mod._load_stat(get_trows)
def get_trows(self):
yield '.loader: _load_stat'
yield '.format: %s'%self.format.__class__.__name__
yield '.timemade: %f'%self.timemade
yield '.count: %d'%self.count
yield '.size: %d'%self.size
yield '.kindname: %s'%self.kindname
yield '.kindheader: %s'%self.kindheader
yield '.numrows: %d'%self.numrows
for row in self.get_rows():
yield '.r: %s'%self.format.get_rowdata(row)
def init_format(self, FormatClass):
self.format = FormatClass(self)
def ppob(self, ob, idx=None):
return self.format.ppob(ob, idx)
class IdentityPartitionCluster(object):
# Contains objects of same size.
# to speed up management of identity partition
# - since otherwise we'd have to sort all the objects,
# on their string representation in worst case.
__slots__ = 'objects','locount','hicount','losize','obsize','issorted'
def __init__(self, objects, locount, count, losize, obsize):
self.objects = objects # tuple of objects in this segment
self.locount = locount # count BEFORE objects in this cluster
self.hicount = locount+count # count AFTER these objects
self.losize = losize # size BEFORE objects in this cluster
self.obsize = obsize # size of EACH object in this segment
self.issorted = False # indicates if .objects is sorted
class IdentityPartition(Partition):
def __init__(self, mod, set, er):
Partition.__init__(self, mod, set, er)
clusters = []
sizeclasses = mod.Size.classifier.partition_cli(set.nodes)
sizeclasses.sort()
sizeclasses.reverse()
totcount = 0
totsize = 0
for size, v in sizeclasses:
count = len(v)
clusters.append(IdentityPartitionCluster(
self.mod.observation_list(v), totcount, count, totsize, size))
totsize += size * count
totcount += count
assert totcount == set.count
self.cluidx = 0
self.clusters = clusters
self.count = totcount
self.kind = kind = set.byclodo.kind
self.kindheader = kind.fam.c_get_idpart_header(kind)
self.kindname = kind.fam.c_get_idpart_label(kind)
self.numrows = totcount
self.render = kind.fam.c_get_idpart_render(kind)
self.size = totsize
self.sortrender = kind.fam.c_get_idpart_sortrender(kind)
self.init_format(IdFormat)
def get_nodeset(self, start, stop, step):
return self.get_nodeset_cluster(start, stop, step)[0]
def get_nodeset_cluster(self, start, stop, step):
if step <= 0:
raise ValueError, 'Step must be positive.'
ns = self.mod.mutnodeset()
if start >= stop:
return (ns, None)
clusters = self.clusters
lo = 0
hi = len(clusters)
cluidx = self.cluidx
while lo < hi:
clu = clusters[cluidx]
if clu.locount <= start:
if start < clu.hicount:
break
else:
lo = cluidx + 1
else:
hi = cluidx
cluidx = (lo + hi) // 2
else:
return (ns, None)
clu_to_return = clu
while 1:
objects = clu.objects
if start != clu.locount or stop < clu.hicount or step != 1:
if not clu.issorted:
sortrender = self.sortrender
if sortrender == 'IDENTITY':
ks = objects
else:
ks = [sortrender(x) for x in objects]
ks = [(kind, i) for i, kind in enumerate(ks)]
ks.sort()
clu.objects = objects = self.mod.observation_list(
[objects[i] for (kind, i) in ks])
clu.issorted = True
objects = objects[start-clu.locount:stop-clu.locount:step]
ns |= objects
self.cluidx = cluidx # memo till next call
start += len(objects)*step
if start >= stop:
break
for cluidx in range(cluidx + 1, len(clusters)):
clu = clusters[cluidx]
if clu.locount <= start < clu.hicount:
break
else:
break
return (ns, clu_to_return)
def get_row(self, rowidx):
ns, clu = self.get_nodeset_cluster(rowidx, rowidx+1, 1)
if not ns:
raise IndexError, 'Partition index out of range.'
vi = self.mod.idset(ns, er=self.er)
row = PartRow(1, clu.obsize, self.render(vi.theone),
rowidx, (rowidx+1-clu.locount)*clu.obsize + clu.losize,
vi, vi.kind)
return row
def get_rowset(self, rowidx):
ns = self.get_nodeset(rowidx, rowidx+1, 1)
if not ns:
raise IndexError, 'Partition index out of range.'
return self.mod.idset(ns, er=self.er)
class SetPartition(Partition):
def __init__(self, mod, set, er):
Partition.__init__(self, mod, set, er)
classifier = er.classifier
tosort = [(-part.size, classifier.get_tabrendering(kind, ''), kind, part)
for (kind, part) in classifier.partition(set.nodes)]
tosort.sort()
cumulsize = 0
rows = []
for (minusize, name, kind, part) in tosort:
size = -minusize
cumulsize += size
# assert size == part.size
rows.append(PartRow(
part.count, size, name,
len(rows), cumulsize,
part, kind))
# No check. Sizes may change. Note feb 8 2006.
#assert cumulsize == set.size
self.count = set.count
self.kindheader = classifier.get_tabheader('')
self.kindname = ''
self.numrows = len(rows)
self.rows = rows
self.size = cumulsize
self.init_format(SetFormat)
def get_nodeset(self, start, stop, step):
if step <= 0:
raise ValueError, 'Step must be positive.'
ns = self.mod.mutnodeset()
while start < stop:
ns |= self.rows[start].set.nodes
start += step
return ns
def get_row(self, idx):
try:
return self.rows[idx]
except IndexError:
raise IndexError, 'Partition index out of range.'
def get_rowset(self, idx):
return self.get_row(idx).set
class _GLUECLAMP_:
_preload_ = ('_hiding_tag_',)
_chgable_ = ('line_length', 'backup_suffix')
_imports_ = (
'_parent.OutputHandling:output_buffer',
'_parent.OutputHandling:basic_more_printer',
'_parent.ImpSet:mutnodeset',
'_parent.Use:Id',
'_parent.Use:Size',
'_parent.Use:idset',
'_parent.Use:load',
'_parent.View:_hiding_tag_',
'_parent.View:observation_list',
'_root.os:rename',
'_root.textwrap:fill',
'_root.textwrap:wrap',
'_root.textwrap:wrap',
'_root:time',
)
# 'Config'
line_length = 100
backup_suffix = '.old'
# Factory method
def partition(self, set, er):
if er.classifier is self.Id.classifier:
return IdentityPartition(self, set, er)
else:
return SetPartition(self, set, er)
# Private - Use.load is intended to be used directly.
def _load_stat(self, get_trows):
return Stat(self, get_trows)
| apache-2.0 |
yfried/ansible | test/units/plugins/action/test_raw.py | 45 | 3763 | # (c) 2016, Saran Ahluwalia <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleActionFail
from units.compat import unittest
from units.compat.mock import patch, MagicMock, Mock
from ansible.plugins.action.raw import ActionModule
from ansible.playbook.task import Task
from ansible.plugins.loader import connection_loader
play_context = Mock()
play_context.shell = 'sh'
connection = connection_loader.get('local', play_context, os.devnull)
class TestCopyResultExclude(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# The current behavior of the raw aciton in regards to executable is currently in question;
# the test_raw_executable_is_not_empty_string verifies the current behavior (whether it is desireed or not.
# Please refer to the following for context:
# Issue: https://github.com/ansible/ansible/issues/16054
# PR: https://github.com/ansible/ansible/pull/16085
def test_raw_executable_is_not_empty_string(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am._admin_users = ['root', 'toor']
self.mock_am.run()
self.mock_am._low_level_execute_command.assert_called_with('Args1', executable=False)
def test_raw_check_mode_is_True(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = True
try:
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
except AnsibleActionFail:
pass
def test_raw_test_environment_is_None(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.assertEqual(task.environment, None)
def test_raw_task_vars_is_not_None(self):
task = MagicMock(Task)
task.async_val = False
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am.run(task_vars={'a': 'b'})
self.assertEqual(task.environment, None)
| gpl-3.0 |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCurrentlyTLingBuniMi.py | 1 | 1148 | def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause |
Dhivyap/ansible | lib/ansible/modules/web_infrastructure/taiga_issue.py | 47 | 10952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Alejandro Guirao <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: taiga_issue
short_description: Creates/deletes an issue in a Taiga Project Management Platform
description:
- Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
- An issue is identified by the combination of project, issue subject and issue type.
- This module implements the creation or deletion of issues (not the update).
version_added: "2.0"
options:
taiga_host:
description:
- The hostname of the Taiga instance.
default: https://api.taiga.io
project:
description:
- Name of the project containing the issue. Must exist previously.
required: True
subject:
description:
- The issue subject.
required: True
issue_type:
description:
- The issue type. Must exist previously.
required: True
priority:
description:
- The issue priority. Must exist previously.
default: Normal
status:
description:
- The issue status. Must exist previously.
default: New
severity:
description:
- The issue severity. Must exist previously.
default: Normal
description:
description:
- The issue description.
default: ""
attachment:
description:
- Path to a file to be attached to the issue.
attachment_description:
description:
- A string describing the file to be attached to the issue.
default: ""
tags:
description:
- A lists of tags to be assigned to the issue.
default: []
state:
description:
- Whether the issue should be present or not.
choices: ["present", "absent"]
default: present
author: Alejandro Guirao (@lekum)
requirements: [python-taiga]
notes:
- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
'''
EXAMPLES = '''
# Create an issue in the my hosted Taiga environment and attach an error log
- taiga_issue:
taiga_host: https://mytaigahost.example.com
project: myproject
subject: An error has been found
issue_type: Bug
priority: High
status: New
severity: Important
description: An error has been found. Please check the attached error log for details.
attachment: /path/to/error.log
attachment_description: Error log file
tags:
- Error
- Needs manual check
state: present
# Deletes the previously created issue
- taiga_issue:
taiga_host: https://mytaigahost.example.com
project: myproject
subject: An error has been found
issue_type: Bug
state: absent
'''
RETURN = '''# '''
import traceback
from os import getenv
from os.path import isfile
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
TAIGA_IMP_ERR = None
try:
from taiga import TaigaAPI
from taiga.exceptions import TaigaException
TAIGA_MODULE_IMPORTED = True
except ImportError:
TAIGA_IMP_ERR = traceback.format_exc()
TAIGA_MODULE_IMPORTED = False
def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
issue_status, issue_type, issue_severity, issue_description,
issue_attachment, issue_attachment_description,
issue_tags, state, check_mode=False):
"""
Method that creates/deletes issues depending whether they exist and the state desired
The credentials should be passed via environment variables:
- TAIGA_TOKEN
- TAIGA_USERNAME and TAIGA_PASSWORD
Returns a tuple with these elements:
- A boolean representing the success of the operation
- A descriptive message
- A dict with the issue attributes, in case of issue creation, otherwise empty dict
"""
changed = False
try:
token = getenv('TAIGA_TOKEN')
if token:
api = TaigaAPI(host=taiga_host, token=token)
else:
api = TaigaAPI(host=taiga_host)
username = getenv('TAIGA_USERNAME')
password = getenv('TAIGA_PASSWORD')
if not any([username, password]):
return (False, changed, "Missing credentials", {})
api.auth(username=username, password=password)
user_id = api.me().id
project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
if len(project_list) != 1:
return (False, changed, "Unable to find project %s" % project_name, {})
project = project_list[0]
project_id = project.id
priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
if len(priority_list) != 1:
return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
priority_id = priority_list[0].id
status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
if len(status_list) != 1:
return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
status_id = status_list[0].id
type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
if len(type_list) != 1:
return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
type_id = type_list[0].id
severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
if len(severity_list) != 1:
return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
severity_id = severity_list[0].id
issue = {
"project": project_name,
"subject": issue_subject,
"priority": issue_priority,
"status": issue_status,
"type": issue_type,
"severity": issue_severity,
"description": issue_description,
"tags": issue_tags,
}
# An issue is identified by the project_name, the issue_subject and the issue_type
matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
matching_issue_list_len = len(matching_issue_list)
if matching_issue_list_len == 0:
# The issue does not exist in the project
if state == "present":
# This implies a change
changed = True
if not check_mode:
# Create the issue
new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
if issue_attachment:
new_issue.attach(issue_attachment, description=issue_attachment_description)
issue["attachment"] = issue_attachment
issue["attachment_description"] = issue_attachment_description
return (True, changed, "Issue created", issue)
else:
# If does not exist, do nothing
return (True, changed, "Issue does not exist", {})
elif matching_issue_list_len == 1:
# The issue exists in the project
if state == "absent":
# This implies a change
changed = True
if not check_mode:
# Delete the issue
matching_issue_list[0].delete()
return (True, changed, "Issue deleted", {})
else:
# Do nothing
return (True, changed, "Issue already exists", {})
else:
# More than 1 matching issue
return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
except TaigaException as exc:
msg = "An exception happened: %s" % to_native(exc)
return (False, changed, msg, {})
def main():
module = AnsibleModule(
argument_spec=dict(
taiga_host=dict(required=False, default="https://api.taiga.io"),
project=dict(required=True),
subject=dict(required=True),
issue_type=dict(required=True),
priority=dict(required=False, default="Normal"),
status=dict(required=False, default="New"),
severity=dict(required=False, default="Normal"),
description=dict(required=False, default=""),
attachment=dict(required=False, default=None),
attachment_description=dict(required=False, default=""),
tags=dict(required=False, default=[], type='list'),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
),
supports_check_mode=True
)
if not TAIGA_MODULE_IMPORTED:
module.fail_json(msg=missing_required_lib("python-taiga"),
exception=TAIGA_IMP_ERR)
taiga_host = module.params['taiga_host']
project_name = module.params['project']
issue_subject = module.params['subject']
issue_priority = module.params['priority']
issue_status = module.params['status']
issue_type = module.params['issue_type']
issue_severity = module.params['severity']
issue_description = module.params['description']
issue_attachment = module.params['attachment']
issue_attachment_description = module.params['attachment_description']
if issue_attachment:
if not isfile(issue_attachment):
msg = "%s is not a file" % issue_attachment
module.fail_json(msg=msg)
issue_tags = module.params['tags']
state = module.params['state']
return_status, changed, msg, issue_attr_dict = manage_issue(
module,
taiga_host,
project_name,
issue_subject,
issue_priority,
issue_status,
issue_type,
issue_severity,
issue_description,
issue_attachment,
issue_attachment_description,
issue_tags,
state,
check_mode=module.check_mode
)
if return_status:
if len(issue_attr_dict) > 0:
module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
else:
module.exit_json(changed=changed, msg=msg)
else:
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
OptiPop/external_chromium_org_third_party_skia | gm/rename_config.py | 20 | 3431 | #!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to rename a config in some subset of our GM expectation files.
Created for http://skbug.com/2752 ('split existing "gpu" GM results into "gl"
and "gles"')
Run with -h to see usage.
Example command lines:
rename_config.py gpu gles '.*Android.*'
TODO(epoger): Once https://codereview.chromium.org/397103003/ is committed,
we should add a unittest. Until then, we can test this as follows:
OLD=expectations/gm && NEW=/tmp/expectations && \
rm -rf $NEW && \
cp -a $OLD $NEW && \
gm/rename_config.py msaa4 gles-msaa4 '.*Android.*' \
--expectations-root $NEW && \
diff --recursive $OLD $NEW
"""
__author__ = 'Elliot Poger'
import argparse
import os
import re
import gm_json
DEFAULT_EXPECTATIONS_ROOT = os.path.join(
os.path.dirname(__file__), os.pardir, 'expectations', 'gm')
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
class Renamer(object):
def __init__(self, args):
"""
Params:
args: the Namespace object generated by argparse.parse_args()
"""
self._args = args
def run(self):
"""Perform all the subsitutions."""
for path in self._get_file_list():
self._rename_config(path=path,
old=self._args.old_config_name,
new=self._args.new_config_name)
def _rename_config(self, path, old, new):
"""Renames all instances of a config within a GM expectations file.
Params:
path: path to file which will be modified in place
old: old config name
new: new config name
"""
dic = gm_json.LoadFromFile(file_path=path)
expected_results = dic[gm_json.JSONKEY_EXPECTEDRESULTS]
orig_keys = expected_results.keys()
for key in orig_keys:
result = expected_results.pop(key)
(testname, config) = IMAGE_FILENAME_RE.match(key).groups()
if config == old:
config = new
key = '%s_%s.png' % (testname, config)
expected_results[key] = result
gm_json.WriteToFile(json_dict=dic, file_path=path)
def _get_file_list(self):
"""Returns the list of files we want to operate on (the complete path
to each file)."""
root = self._args.expectations_root
regex = re.compile(self._args.builder_name_pattern)
return [os.path.join(root, builder, 'expected-results.json')
for builder in os.listdir(root)
if regex.match(builder)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('old_config_name',
help=('Config name we want to replace.'))
parser.add_argument('new_config_name',
help=('Config name we want to replace the old one with.'))
parser.add_argument('builder_name_pattern',
help=('Regex pattern describing which builders we want '
'to make the substitution for; \'.*\' to perform '
'the replacement on all builders.'))
parser.add_argument('--expectations-root',
default=DEFAULT_EXPECTATIONS_ROOT,
help=('Root of the GM expectations dir; defaults to '
'%(default)s'))
args = parser.parse_args()
renamer = Renamer(args)
renamer.run()
if __name__ == '__main__':
main()
| bsd-3-clause |
ychfan/tensorflow | tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py | 10 | 10517 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Exponential distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import exponential
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
__all__ = ["VectorExponentialLinearOperator"]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorExponentialLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in S(loc, scale),
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `S = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`, is an image of
the positive half-space,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`,
* `Z` denotes the normalization constant.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
ds = tf.contrib.distributions
la = tf.linalg
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
mat = [[1.0, 0.1],
[0.1, 1.0]]
vex = ds.VectorExponentialLinearOperator(
scale=la.LinearOperatorFullMatrix(mat))
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([1., 2.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
mu = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = ds.VectorExponentialLinearOperator(
loc=mu,
scale=la.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialLinearOperator"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = locals()
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorExponentialLinearOperator, self).__init__(
distribution=exponential.Exponential(rate=array_ops.ones(
[], dtype=scale.dtype), allow_nan_stats=allow_nan_stats),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorExponentialLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorExponentialLinearOperator, self)._prob(x)
def _mean(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then E[X] = loc + L1, where 1 is the vector of ones.
scale_x_ones = self.bijector.scale.matvec(
array_ops.ones(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_ones
return array_ops.identity(self.loc) + scale_x_ones
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
# Cov(X) = L Cov(W W^T) L^T = L L^T.
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(
array_ops.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(
array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
scale_x_zeros = self.bijector.scale.matvec(
array_ops.zeros(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_zeros
return array_ops.identity(self.loc) + scale_x_zeros
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
| apache-2.0 |
yfried/ansible | lib/ansible/modules/monitoring/icinga2_host.py | 35 | 9960 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This module is proudly sponsored by CGI (www.cgi.com) and
# KPN (www.kpn.com).
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: icinga2_host
short_description: Manage a host in Icinga2
description:
- "Add or remove a host to Icinga2 through the API."
- "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
version_added: "2.5"
author: "Jurgen Brand (@t794104)"
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
required: true
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without C(url_password) for sites that allow empty passwords.
url_password:
description:
- The password for use in HTTP basic authentication.
- If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
force_basic_auth:
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
type: bool
default: 'no'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, C(client_key) is not required.
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If C(client_cert) contains both the certificate
and key, this option is not required.
state:
description:
- Apply feature state.
choices: [ "present", "absent" ]
default: present
name:
description:
- Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
required: true
zone:
description:
- The zone from where this host should be polled.
template:
description:
- The template used to define the host.
- Template cannot be modified after object creation.
check_command:
description:
- The command used to check if the host is alive.
default: "hostalive"
display_name:
description:
- The name used to display the host.
default: if none is give it is the value of the <name> parameter
ip:
description:
- The IP address of the host.
required: true
variables:
description:
- List of variables.
'''
EXAMPLES = '''
- name: Add host to icinga
icinga2_host:
url: "https://icinga2.example.com"
url_username: "ansible"
url_password: "a_secret"
state: present
name: "{{ ansible_fqdn }}"
ip: "{{ ansible_default_ipv4.address }}"
delegate_to: 127.0.0.1
'''
RETURN = '''
name:
description: The name used to create, modify or delete the host
type: string
returned: always
data:
description: The data structure used for create, modify or delete of the host
type: dict
returned: always
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url, url_argument_spec
# ===========================================
# Icinga2 API class
#
class icinga2_api:
module = None
def call_url(self, path, data='', method='GET'):
headers = {
'Accept': 'application/json',
'X-HTTP-Method-Override': method,
}
url = self.module.params.get("url") + "/" + path
rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method)
body = ''
if rsp:
body = json.loads(rsp.read())
if info['status'] >= 400:
body = info['body']
return {'code': info['status'], 'data': body}
def check_connection(self):
ret = self.call_url('v1/status')
if ret['code'] == 200:
return True
return False
def exists(self, hostname):
data = {
"filter": "match(\"" + hostname + "\", host.name)",
}
ret = self.call_url(
path="v1/objects/hosts",
data=self.module.jsonify(data)
)
if ret['code'] == 200:
if len(ret['data']['results']) == 1:
return True
return False
def create(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="PUT"
)
return ret
def delete(self, hostname):
data = {"cascade": 1}
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="DELETE"
)
return ret
def modify(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="POST"
)
return ret
def diff(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
method="GET"
)
changed = False
ic_data = ret['data']['results'][0]
for key in data['attrs']:
if key not in ic_data['attrs'].keys():
changed = True
elif data['attrs'][key] != ic_data['attrs'][key]:
changed = True
return changed
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# remove unnecessary argument 'force'
del argument_spec['force']
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
name=dict(required=True, aliases=['host']),
zone=dict(),
template=dict(default=None),
check_command=dict(default="hostalive"),
display_name=dict(default=None),
ip=dict(required=True),
variables=dict(type='dict', default=None),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params["state"]
name = module.params["name"]
zone = module.params["zone"]
template = []
template.append(name)
if module.params["template"]:
template.append(module.params["template"])
check_command = module.params["check_command"]
ip = module.params["ip"]
display_name = module.params["display_name"]
if not display_name:
display_name = name
variables = module.params["variables"]
try:
icinga = icinga2_api()
icinga.module = module
icinga.check_connection()
except Exception as e:
module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
data = {
'attrs': {
'address': ip,
'display_name': display_name,
'check_command': check_command,
'zone': zone,
'vars': {
'made_by': "ansible",
},
'templates': template,
}
}
if variables:
data['attrs']['vars'].update(variables)
changed = False
if icinga.exists(name):
if state == "absent":
if module.check_mode:
module.exit_json(changed=True, name=name, data=data)
else:
try:
ret = icinga.delete(name)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code deleting host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception deleting host: " + str(e))
elif icinga.diff(name, data):
if module.check_mode:
module.exit_json(changed=False, name=name, data=data)
# Template attribute is not allowed in modification
del data['attrs']['templates']
ret = icinga.modify(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code modifying host: %s" % (ret['data']))
else:
if state == "present":
if module.check_mode:
changed = True
else:
try:
ret = icinga.create(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code creating host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception creating host: " + str(e))
module.exit_json(changed=changed, name=name, data=data)
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 |
xcgspring/AXUI | test/test_driver/windows/test_Translater.py | 1 | 1731 |
import sys
import unittest
class TestTranslater(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_coordinate_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Coordinate = '(12 ,34, 56, 79)'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_index_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND Index=3"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_UIA_identifier(self):
import AXUI.driver.windows.Translater as translater
from AXUI.parsing.identifier_parsing import identifier_lexer, identifier_parser
identifier = "Name='menu bar' AND LocalizedControlType='menu bar'"
parsed_identifier = identifier_parser.parse(identifier, lexer=identifier_lexer)
translated_identifier = translater.ID_Translater(parsed_identifier).get_translated()
print(translated_identifier)
| apache-2.0 |
christiansandberg/canopen | test/test_emcy.py | 1 | 2212 | import unittest
from canopen import emcy
class TestEmcyConsumer(unittest.TestCase):
def test_emcy_list(self):
emcy_node = emcy.EmcyConsumer()
emcy_node.on_emcy(0x81, b'\x01\x20\x02\x00\x01\x02\x03\x04', 1473418396.0)
emcy_node.on_emcy(0x81, b'\x10\x90\x01\x00\x01\x02\x03\x04', 1473418397.0)
self.assertEqual(len(emcy_node.log), 2)
self.assertEqual(len(emcy_node.active), 2)
error = emcy_node.log[0]
self.assertIsInstance(error, emcy.EmcyError)
self.assertIsInstance(error, Exception)
self.assertEqual(error.code, 0x2001)
self.assertEqual(error.register, 0x02)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418396.0)
self.assertEqual(emcy_node.active[0], error)
error = emcy_node.log[1]
self.assertEqual(error.code, 0x9010)
self.assertEqual(error.register, 0x01)
self.assertEqual(error.data, b'\x00\x01\x02\x03\x04')
self.assertAlmostEqual(error.timestamp, 1473418397.0)
self.assertEqual(emcy_node.active[1], error)
emcy_node.on_emcy(0x81, b'\x00\x00\x00\x00\x00\x00\x00\x00', 1473418397.0)
self.assertEqual(len(emcy_node.log), 3)
self.assertEqual(len(emcy_node.active), 0)
def test_str(self):
error = emcy.EmcyError(0x2001, 0x02, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x2001, Current")
error = emcy.EmcyError(0x50FF, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x50FF, Device Hardware")
error = emcy.EmcyError(0x7100, 0x01, b'\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(str(error), "Code 0x7100")
class MockNetwork(object):
data = None
def send_message(self, can_id, data):
self.data = data
class TestEmcyProducer(unittest.TestCase):
def test_send(self):
network = MockNetwork()
emcy_node = emcy.EmcyProducer(0x80 + 1)
emcy_node.network = network
emcy_node.send(0x2001, 0x2, b'\x00\x01\x02\x03\x04')
self.assertEqual(network.data, b'\x01\x20\x02\x00\x01\x02\x03\x04')
| mit |
hlin117/scikit-learn | doc/tutorial/machine_learning_map/svg2imagemap.py | 360 | 3411 | #!/usr/local/bin/python
"""
This script converts a subset of SVG into an HTML imagemap
Note *subset*. It only handles <path> elements, for which it only pays
attention to the M and L commands. Futher, it only notices the "translate"
transform.
It was written to generate the examples in the documentation for maphilight,
and thus is very squarely aimed at handling several SVG maps from wikipedia.
It *assumes* that all the <path>s it will need are inside a <g>. Any <path>
outside of a <g> will be ignored.
It takes several possible arguments, in the form:
$ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]
FILENAME must be the name of an SVG file. All other arguments are optional.
x and y, if present, are the dimensions of the image you'll be creating from
the SVG. If not present, it assumes the values of the width and height
attributes in the SVG file.
group1 through groupN are group ids. If only want particular groups used,
enter their ids here and all others will be ignored.
"""
import os
import re
import sys
import xml.dom.minidom
import parse_path
if len(sys.argv) == 1:
sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]")
if not os.path.exists(sys.argv[1]):
sys.exit("Input file does not exist")
x, y, groups = None, None, None
if len(sys.argv) >= 3:
x = float(sys.argv[2])
y = float(sys.argv[3])
if len(sys.argv) > 3:
groups = sys.argv[4:]
svg_file = xml.dom.minidom.parse(sys.argv[1])
svg = svg_file.getElementsByTagName('svg')[0]
raw_width = float(svg.getAttribute('width'))
raw_height = float(svg.getAttribute('height'))
width_ratio = x and (x / raw_width) or 1
height_ratio = y and (y / raw_height) or 1
if groups:
elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)]
elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)])
else:
elements = svg.getElementsByTagName('g')
parsed_groups = {}
for e in elements:
paths = []
if e.nodeName == 'g':
for path in e.getElementsByTagName('path'):
points = parse_path.get_points(path.getAttribute('d'))
for pointset in points:
paths.append([path.getAttribute('id'), pointset])
else:
points = parse_path.get_points(e.getAttribute('d'))
for pointset in points:
paths.append([e.getAttribute('id'), pointset])
if e.hasAttribute('transform'):
print e.getAttribute('id'), e.getAttribute('transform')
for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')):
if transform[0] == 'translate':
x_shift = float(transform[1])
y_shift = float(transform[2])
for path in paths:
path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]]
parsed_groups[e.getAttribute('id')] = paths
out = []
for g in parsed_groups:
for path in parsed_groups[g]:
out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' %
(path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]])))
outfile = open(sys.argv[1].replace('.svg', '.html'), 'w')
outfile.write('\n'.join(out))
| bsd-3-clause |
arnedesmedt/dotfiles | .config/sublime-text-3/Packages.symlinkfollow/pygments/all/pygments/lexers/tcl.py | 47 | 5398 | # -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| mit |
medallia/aurora | src/main/python/apache/aurora/config/resource.py | 2 | 3436 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
from numbers import Number
from enum import Enum, unique
from gen.apache.aurora.api.ttypes import Resource
ResourceDetails = namedtuple('ResourceDetails', ['resource_type', 'value'])
@unique
class ResourceType(Enum):
"""Describes Aurora resource types and their traits."""
CPUS = ('numCpus', 'CPU', ' core(s)', float, 1)
RAM_MB = ('ramMb', 'RAM', ' MB', int, 2)
DISK_MB = ('diskMb', 'Disk', ' MB', int, 3)
PORTS = ('namedPort', 'Port', '', str, 4)
GPUS = ('numGpus', 'GPU', ' GPU(s)', int, 5)
def __init__(self, field, display_name, display_unit, value_type, display_position):
self._field = field
self._display_name = display_name
self._display_unit = display_unit
self._value_type = value_type
self._display_position = display_position
@property
def field(self):
return self._field
@property
def display_name(self):
return self._display_name
@property
def display_unit(self):
return self._display_unit
@property
def value_type(self):
return self._value_type
@property
def display_position(self):
return self._display_position
def resource_value(self, resource):
return resource.__dict__.get(self._field)
@classmethod
def from_resource(cls, resource):
for _, member in cls.__members__.items():
if resource.__dict__.get(member.field) is not None:
return member
else:
raise ValueError("Unknown resource: %s" % resource)
class ResourceManager(object):
"""Provides helper methods for working with Aurora resources."""
@classmethod
def resource_details(cls, resources):
result = []
if resources:
for resource in list(resources):
r_type = ResourceType.from_resource(resource)
result.append(ResourceDetails(r_type, r_type.resource_value(resource)))
return sorted(result, key=lambda rd: rd.resource_type.display_position)
return result
@classmethod
def resource_details_from_quota(cls, quota):
return cls.resource_details(quota.resources)
@classmethod
def resource_details_from_task(cls, task):
return cls.resource_details(cls._backfill_resources(task))
@classmethod
def quantity_of(cls, resource_details, resource_type):
result = 0.0
for d in resource_details:
if d.resource_type is resource_type:
result += d.value if isinstance(d.value, Number) else 1
return result
@classmethod
def _backfill_resources(cls, r_object):
resources = list(r_object.resources) if r_object.resources else None
if resources is None:
resources = [
Resource(numCpus=r_object.numCpus),
Resource(ramMb=r_object.ramMb),
Resource(diskMb=r_object.diskMb)
]
if hasattr(r_object, 'requestedPorts'):
resources += [Resource(namedPort=p) for p in r_object.requestedPorts or []]
return resources
| apache-2.0 |
Froff/TFY4115-Simulering | python/Simulation.py | 1 | 1185 | from math import sqrt
import Slope
class Simulation:
SIM_STEP_SIZE = 0.0001
const_g = -981
def __init__ (self, slope, **kwargs):
self.slope = slope
self.t = [0]
self.x = [Simulation.SIM_STEP_SIZE]
self.mom_inertia_coefficient = 0
for name, value in kwargs.items():
if name == "startingposition":
self.x = [value]
if name == "momentofintertiacoefficient":
self.mom_inertia_coefficient = value
def runSimulation(self):
while not self.isFinished():
self.step()
def step (self):
x = self.x[-1]
dydx = self.slope.dydx(x)
y = self.slope.f(x) - self.slope.f(0)
I = self.mom_inertia_coefficient
g = Simulation.const_g
step_size = Simulation.SIM_STEP_SIZE
try:
self.x.append(x + step_size * sqrt( (2*g*y) / ( (1 + I) * (1 + dydx**2) ) ))
self.t.append(self.t[-1] + Simulation.SIM_STEP_SIZE)
except ValueError:
print("Math domain error. x={}, y={}".format(x, y))
exit(2)
def isFinished (self):
return self.x[-1] >= self.slope.end
| mit |
Fl0rianFischer/sme_odoo | addons/l10n_pl/__openerp__.py | 19 | 1191 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2009 - now Grzegorz Grzelak [email protected]
{
'name' : 'Poland - Accounting',
'version' : '1.02',
'author' : 'Grzegorz Grzelak (OpenGLOBE)',
'website': 'http://www.openglobe.pl',
'category' : 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart and taxes for Poland in OpenERP.
==================================================================================
To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i
rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów
zakładając, że wszystkie towary są w obrocie hurtowym.
Niniejszy moduł jest przeznaczony dla odoo 8.0.
Wewnętrzny numer wersji OpenGLOBE 1.02
""",
'depends' : ['account', 'base_iban', 'base_vat'],
'demo' : [],
'data' : [
'account_chart.xml',
'account_tax.xml',
'fiscal_position.xml',
'country_pl.xml',
'account_chart_template.yml'
],
'installable': True,
}
| gpl-3.0 |
kampanita/pelisalacarta | python/main-classic/channels/pelisdanko.py | 3 | 14488 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para PelisDanko
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
import sys
from core import config
from core import logger
from core import scrapertools
from core.item import Item
__modo_grafico__ = config.get_setting('modo_grafico', 'pelisdanko')
DEBUG = config.get_setting("debug")
host = "http://pelisdanko.com"
art = "http://pelisdanko.com/img/background.jpg"
def mainlist(item):
logger.info("pelisalacarta.channels.pelisdanko mainlist")
itemlist = []
itemlist.append(item.clone(action="novedades", title="Novedades", url=host + "/novedades",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Estrenos", url=host + "/estrenos",
fanart=art))
itemlist.append(item.clone(action="novedades", title="Populares", url=host + "/populares",
fanart=art))
itemlist.append(item.clone(action="actualizadas", title="Películas actualizadas", url=host,
fanart=art))
itemlist.append(item.clone(action="indices", title="Índices", fanart=art))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", fanart=art))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", fanart=art,
text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
platformtools.show_channel_settings()
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")
def search(item, texto):
logger.info("pelisalacarta.channels.pelisdanko search")
texto = texto.replace(" ", "+")
item.url = "http://pelisdanko.com/busqueda?terms=%s" % texto
try:
return novedades(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info("pelisalacarta.channels.pelisdanko newest")
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://pelisdanko.com/novedades"
itemlist = novedades(item)
if itemlist[-1].action == "novedades":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def novedades(item):
logger.info("pelisalacarta.channels.pelisdanko novedades")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque = scrapertools.find_multiple_matches(data, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d] col-lg-[\d]'
' text-center"(.*?)</div>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + "[/COLOR]"
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context="05", trailer=True))
# Busca enlaces de paginas siguientes...
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" rel="next">')
if len(next_page_url) > 0:
itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url))
return itemlist
def actualizadas(item):
logger.info("pelisalacarta.channels.pelisdanko actualizadas")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
bloque_big = scrapertools.find_single_match(data, 'Últimas actualizaciones(.*?)<div class="col-xs-10 col-md-8 '
'text-left">')
bloque = scrapertools.find_multiple_matches(bloque_big, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d]'
' col-lg-[\d] text-center"(.*?)<br><br>')
for match in bloque:
calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>'
'([^<]+)</span>')
calidad = "[COLOR darkseagreen] "
for quality in calidades:
calidad += "[" + quality + "]"
languages = scrapertools.find_multiple_matches(match, '<img width="28".*?alt="([^"]+)"')
idiomas = " ("
for idioma in languages:
idioma = idioma.replace('ES_', '').replace('ES', 'CAST')
if idioma != "CAST" and idioma != "LAT":
idioma = "VOSE"
idiomas += idioma + "/"
patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
contentTitle = scrapedtitle[:]
scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + idiomas[
:-1] + ")[/COLOR]"
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle),
url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
fulltitle=contentTitle, filtro=False, contentTitle=contentTitle,
context="05"))
return itemlist
def indices(item):
logger.info("pelisalacarta.channels.pelisdanko indices")
itemlist = []
item.text_color = "orchid"
itemlist.append(item.clone(action="indice_list", title="Género", url=host, fulltitle="genero"))
itemlist.append(item.clone(action="indice_list", title="Alfabético", url=host, fulltitle="letra"))
itemlist.append(item.clone(action="indice_list", title="Idioma", url=host, fulltitle="idioma"))
itemlist.append(item.clone(action="indice_list", title="Calidad", url=host, fulltitle="calidad"))
itemlist.append(item.clone(action="indice_list", title="Nacionalidad", url=host, fulltitle="nacionalidad"))
return itemlist
def indice_list(item):
logger.info("pelisalacarta.channels.pelisdanko indice_list")
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
patron = '<a href="(http://pelisdanko.com/%s/[^"]+)">([^<]+)</a>' % item.fulltitle
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl))
return itemlist
def enlaces(item):
logger.info("pelisalacarta.channels.pelisdanko enlaces")
item.extra = ""
item.text_color = ""
itemlist = []
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}", '', data)
item.fanart = scrapertools.find_single_match(data, "CUSTOM BACKGROUND.*?url\('([^']+)'")
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>')
year = scrapertools.find_single_match(data, '<dt>Estreno</dt> <dd>(\d+)</dd>')
try:
from core import tmdb
item.infoLabels['year'] = int(year)
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0}
if filtro_enlaces != 0:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "ss", item)
if filtro_enlaces != 1:
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color="dodgerblue", text_bold=True))
itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "dd", item)
trailer_id = scrapertools.find_single_match(data, 'data:\s*\{\s*id:\s*"([^"]+)"')
data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer", post="id=%s" % trailer_id)
url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"')
if url_trailer != "":
url_trailer = url_trailer.replace("embed/", "watch?v=")
item.infoLabels['trailer'] = url_trailer
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
return itemlist
def bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, type, item):
logger.info("pelisalacarta.channels.pelisdanko bloque_enlaces")
bloque = scrapertools.find_single_match(data, '<div role="tabpanel" class="tab-pane fade" id="tab-' +
type + '">(.*?)</table>')
patron = '<tr class="rip hover".*?data-slug="([^"]+)".*?src="http://pelisdanko.com/img/flags/(.*?).png"' \
'.*?<span class="label label-default quality[^>]+>([^<]+)</span>.*?<td class="small">([^<]+)</td>'
matches = scrapertools.find_multiple_matches(bloque, patron)
filtrados = []
for slug, flag, quality, date in matches:
if flag != "ES" and flag != "ES_LAT":
flag = "VOSE"
flag = flag.replace('ES_LAT', 'LAT').replace('ES', 'CAST')
scrapedurl = "%s/%s/%s?#%s" % (item.url, slug, type, type)
scrapedtitle = " [COLOR firebrick]Mostrar enlaces: [/COLOR][COLOR goldenrod][" \
+ flag + "/" + quality + "][/COLOR][COLOR khaki] " + date + "[/COLOR]"
if filtro_idioma == 3 or item.filtro:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle), action="findvideos",
url=scrapedurl, id_enlaces=slug, calidad=quality))
else:
idioma = dict_idiomas[flag]
if idioma == filtro_idioma:
itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle),
action="findvideos", url=scrapedurl, id_enlaces=slug))
else:
if flag not in filtrados:
filtrados.append(flag)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = bbcode_kodi2html("[COLOR orangered] Mostrar enlaces filtrados en %s[/COLOR]") % ", ".join(
filtrados)
itemlist.append(item.clone(title=title, action="enlaces", url=item.url, filtro=True))
return itemlist
def findvideos(item):
logger.info("pelisalacarta.channels.pelisdanko findvideos")
itemlist = []
if item.url[-2:] == "ss":
prefix = "strms"
else:
prefix = "lnks"
# Descarga la pagina
data = scrapertools.downloadpage(item.url)
# Parametros para redireccion donde muestra los enlaces
data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"')
data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"')
url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug)
data = scrapertools.downloadpage(url, post="")
from core import servertools
video_item_list = servertools.find_video_items(data=data)
for video_item in video_item_list:
title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad)
itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play",
server=video_item.server, text_color=""))
# Opción "Añadir esta película a la biblioteca de XBMC"
if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine":
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library",
fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces))
return itemlist
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>') \
.replace('[CR]', '<br>') \
.replace('[B]', '<strong>') \
.replace('[/B]', '</strong>') \
.replace('"color: white"', '"color: auto"')
return text
| gpl-3.0 |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1A/instances/10_0_workflow_full_10files_primary_3sh_3rs_noannot_with_proj_3s_range/generalinfo_0/GeneralInfo_0.py | 50 | 1207 | #!/usr/bin/env python
"""
This activity wants to answer:
- which time interval was analysed?
- how many items has this interval?
"""
# Connection with SciWonc-Dataflow module
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_GeneralInfo_0
# connector and config
client = DataStoreClient("mongodb", ConfigDB_GeneralInfo_0)
# according to config
data = client.getData() # return an array of docs (like a csv reader)
output = []
count = 0
min_time = None
max_time = None
if(data):
# processing
while True:
doc = data.next()
if doc is None:
break;
current_time = float(doc['time'])
if current_time:
if min_time is None or min_time > current_time:
min_time = current_time
if max_time is None or max_time < current_time:
max_time = current_time
count += 1
if count > 0:
newline = {}
newline['interval seconds'] = (max_time - min_time)/1000000
newline['total items'] = count
newline['min timestamp'] = min_time
newline['max timestamp'] = max_time
output.append(newline)
client.saveData(output)
| gpl-3.0 |
mdanielwork/intellij-community | python/helpers/pydev/tests_pydevd_runfiles/test_pydevdio.py | 26 | 1184 | import sys
import os
import unittest
class Test(unittest.TestCase):
def test_it(self):
#make it as if we were executing from the directory above this one (so that we can use jycompletionserver
#without the need for it being in the pythonpath)
#(twice the dirname to get the previous level from this file.)
import test_pydevdio #@UnresolvedImport - importing itself
ADD_TO_PYTHONPATH = os.path.join(os.path.dirname(os.path.dirname(test_pydevdio.__file__)))
sys.path.insert(0, ADD_TO_PYTHONPATH)
try:
from _pydevd_bundle import pydevd_io
original = sys.stdout
try:
sys.stdout = pydevd_io.IOBuf()
print('foo')
print('bar')
self.assertEqual('foo\nbar\n', sys.stdout.getvalue()) #@UndefinedVariable
print('ww')
print('xx')
self.assertEqual('ww\nxx\n', sys.stdout.getvalue()) #@UndefinedVariable
finally:
sys.stdout = original
finally:
#remove it to leave it ok for other tests
sys.path.remove(ADD_TO_PYTHONPATH)
| apache-2.0 |
tchernomax/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_boot_manager.py | 11 | 12392 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_boot_manager
short_description: Manage boot options for the given virtual machine
description:
- This module can be used to manage boot options for the given virtual machine.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde) <[email protected]>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with.
- This is required if C(uuid) parameter is not supplied.
uuid:
description:
- UUID of the instance to manage if known, this is VMware's BIOS UUID.
- This is required if C(name) parameter is not supplied.
boot_order:
description:
- List of the boot devices.
default: []
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: ['first', 'last']
boot_delay:
description:
- Delay in milliseconds before starting the boot sequence.
default: 0
enter_bios_setup:
description:
- If set to C(True), the virtual machine automatically enters BIOS setup the next time it boots.
- The virtual machine resets this flag, so that the machine boots proceeds normally.
type: 'bool'
default: False
boot_retry_enabled:
description:
- If set to C(True), the virtual machine that fails to boot, will try to boot again after C(boot_retry_delay) is expired.
- If set to C(False), the virtual machine waits indefinitely for user intervention.
type: 'bool'
default: False
boot_retry_delay:
description:
- Specify the time in milliseconds between virtual machine boot failure and subsequent attempt to boot again.
- If set, will automatically set C(boot_retry_enabled) to C(True) as this parameter is required.
default: 0
boot_firmware:
description:
- Choose which firmware should be used to boot the virtual machine.
choices: ["bios", "efi"]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Change virtual machine's boot order and related parameters
vmware_guest_boot_manager:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: testvm
boot_delay: 2000
enter_bios_setup: True
boot_retry_enabled: True
boot_retry_delay: 22300
boot_firmware: bios
boot_order:
- floppy
- cdrom
- ethernet
- disk
delegate_to: localhost
register: vm_boot_order
'''
RETURN = r"""
vm_boot_status:
description: metadata about boot order of virtual machine
returned: always
type: dict
sample: {
"current_boot_order": [
"floppy",
"disk",
"ethernet",
"cdrom"
],
"current_boot_delay": 2000,
"current_boot_retry_delay": 22300,
"current_boot_retry_enabled": true,
"current_enter_bios_setup": true,
"current_boot_firmware": "bios",
"previous_boot_delay": 10,
"previous_boot_retry_delay": 10000,
"previous_boot_retry_enabled": true,
"previous_enter_bios_setup": false,
"previous_boot_firmware": "bios",
"previous_boot_order": [
"ethernet",
"cdrom",
"floppy",
"disk"
],
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError
try:
from pyVmomi import vim
except ImportError:
pass
class VmBootManager(PyVmomi):
def __init__(self, module):
super(VmBootManager, self).__init__(module)
self.name = self.params['name']
self.uuid = self.params['uuid']
self.vm = None
def _get_vm(self):
vms = []
if self.uuid:
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
vms = [vm_obj]
elif self.name:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
for temp_vm_object in objects:
if temp_vm_object.obj.name == self.name:
vms.append(temp_vm_object.obj)
if vms:
if self.params.get('name_match') == 'first':
self.vm = vms[0]
elif self.params.get('name_match') == 'last':
self.vm = vms[-1]
else:
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid))
@staticmethod
def humanize_boot_order(boot_order):
results = []
for device in boot_order:
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
results.append('cdrom')
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
results.append('disk')
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
results.append('ethernet')
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
results.append('floppy')
return results
def ensure(self):
self._get_vm()
valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy']
boot_order_list = []
for device_order in self.params.get('boot_order'):
if device_order not in valid_device_strings:
self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order,
"', '".join(valid_device_strings)))
if device_order == 'cdrom':
first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)]
if first_cdrom:
boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
elif device_order == 'disk':
first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
if first_hdd:
boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key))
elif device_order == 'ethernet':
first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)]
if first_ether:
boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key))
elif device_order == 'floppy':
first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)]
if first_floppy:
boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
change_needed = False
kwargs = dict()
if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder):
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
else:
for i in range(0, len(boot_order_list)):
boot_device_type = type(boot_order_list[i])
vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i])
if boot_device_type != vm_boot_device_type:
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
if self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'):
kwargs.update({'bootDelay': self.params.get('boot_delay')})
change_needed = True
if self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'):
kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')})
change_needed = True
if self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'):
kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')})
change_needed = True
if self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'):
if not self.vm.config.bootOptions.bootRetryEnabled:
kwargs.update({'bootRetryEnabled': True})
kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')})
change_needed = True
boot_firmware_required = False
if self.vm.config.firmware != self.params.get('boot_firmware'):
change_needed = True
boot_firmware_required = True
changed = False
results = dict(
previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
previous_boot_delay=self.vm.config.bootOptions.bootDelay,
previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
previous_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
previous_boot_firmware=self.vm.config.firmware,
current_boot_order=[],
)
if change_needed:
vm_conf = vim.vm.ConfigSpec()
vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
if boot_firmware_required:
vm_conf.firmware = self.params.get('boot_firmware')
task = self.vm.ReconfigVM_Task(vm_conf)
try:
changed, result = wait_for_task(task)
except TaskError as e:
self.module.fail_json(msg="Failed to perform reconfigure virtual"
" machine %s for boot order due to: %s" % (self.name or self.uuid,
to_native(e)))
results.update(
{
'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
'current_boot_delay': self.vm.config.bootOptions.bootDelay,
'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup,
'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled,
'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay,
'current_boot_firmware': self.vm.config.firmware,
}
)
self.module.exit_json(changed=changed, vm_boot_status=results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
boot_order=dict(
type='list',
default=[],
),
name_match=dict(
choices=['first', 'last'],
default='first'
),
boot_delay=dict(
type='int',
default=0,
),
enter_bios_setup=dict(
type='bool',
default=False,
),
boot_retry_enabled=dict(
type='bool',
default=False,
),
boot_retry_delay=dict(
type='int',
default=0,
),
boot_firmware=dict(
type='str',
choices=['efi', 'bios'],
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid']
],
mutually_exclusive=[
['name', 'uuid']
],
)
pyv = VmBootManager(module)
pyv.ensure()
if __name__ == '__main__':
main()
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/gkehub/v1alpha2/gkehub-v1alpha2-py/google/cloud/gkehub_v1alpha2/services/gke_hub/pagers.py | 1 | 5811 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.gkehub_v1alpha2.types import membership
class ListMembershipsPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__iter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., membership.ListMembershipsResponse],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[membership.Membership]:
for page in self.pages:
yield from page.resources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListMembershipsAsyncPager:
"""A pager for iterating through ``list_memberships`` requests.
This class thinly wraps an initial
:class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``resources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMemberships`` requests and continue to iterate
through the ``resources`` field on the
corresponding responses.
All the usual :class:`google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[membership.ListMembershipsResponse]],
request: membership.ListMembershipsRequest,
response: membership.ListMembershipsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gkehub_v1alpha2.types.ListMembershipsRequest):
The initial request object.
response (google.cloud.gkehub_v1alpha2.types.ListMembershipsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = membership.ListMembershipsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[membership.ListMembershipsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[membership.Membership]:
async def async_generator():
async for page in self.pages:
for response in page.resources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 |
wuhengzhi/chromium-crosswalk | tools/json_schema_compiler/js_externs_generator_test.py | 15 | 8773 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import json_parse
from js_externs_generator import JsExternsGenerator
from datetime import datetime
import model
import sys
import unittest
# The contents of a fake idl file.
fake_idl = """
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A totally fake API.
namespace fakeApi {
enum Greek {
ALPHA,
BETA,
GAMMA,
DELTA
};
dictionary Bar {
long num;
};
dictionary Baz {
DOMString str;
long num;
boolean b;
Greek letter;
Greek? optionalLetter;
long[] arr;
Bar[]? optionalObjArr;
Greek[] enumArr;
any[] anythingGoes;
Bar obj;
long? maybe;
(DOMString or Greek or long[]) choice;
object plainObj;
ArrayBuffer arrayBuff;
};
callback VoidCallback = void();
callback BazGreekCallback = void(Baz baz, Greek greek);
interface Functions {
// Does something exciting! And what's more, this is a multiline function
// comment! It goes onto multiple lines!
// |baz| : The baz to use.
static void doSomething(Baz baz, VoidCallback callback);
// |callback| : The callback which will most assuredly in all cases be
// called; that is, of course, iff such a callback was provided and is
// not at all null.
static void bazGreek(optional BazGreekCallback callback);
[deprecated="Use a new method."] static DOMString returnString();
};
interface Events {
// Fired when we realize it's a trap!
static void onTrapDetected(Baz baz);
};
};
"""
# The output we expect from our fake idl file.
expected_output = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeApi.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/master/docs/closure_compilation.md
/** @fileoverview Externs generated from namespace: fakeApi */
/**
* @const
*/
chrome.fakeApi = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeApi#type-Greek
*/
chrome.fakeApi.Greek = {
ALPHA: 'ALPHA',
BETA: 'BETA',
GAMMA: 'GAMMA',
DELTA: 'DELTA',
};
/**
* @typedef {{
* num: number
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Bar
*/
chrome.fakeApi.Bar;
/**
* @typedef {{
* str: string,
* num: number,
* b: boolean,
* letter: !chrome.fakeApi.Greek,
* optionalLetter: (!chrome.fakeApi.Greek|undefined),
* arr: !Array<number>,
* optionalObjArr: (!Array<!chrome.fakeApi.Bar>|undefined),
* enumArr: !Array<!chrome.fakeApi.Greek>,
* anythingGoes: !Array<*>,
* obj: !chrome.fakeApi.Bar,
* maybe: (number|undefined),
* choice: (string|!chrome.fakeApi.Greek|!Array<number>),
* plainObj: Object,
* arrayBuff: ArrayBuffer
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Baz
*/
chrome.fakeApi.Baz;
/**
* Does something exciting! And what's more, this is a multiline function
* comment! It goes onto multiple lines!
* @param {!chrome.fakeApi.Baz} baz The baz to use.
* @param {function():void} callback
* @see https://developer.chrome.com/extensions/fakeApi#method-doSomething
*/
chrome.fakeApi.doSomething = function(baz, callback) {};
/**
* @param {function(!chrome.fakeApi.Baz, !chrome.fakeApi.Greek):void=} callback
* The callback which will most assuredly in all cases be called; that is,
* of course, iff such a callback was provided and is not at all null.
* @see https://developer.chrome.com/extensions/fakeApi#method-bazGreek
*/
chrome.fakeApi.bazGreek = function(callback) {};
/**
* @return {string}
* @deprecated Use a new method.
* @see https://developer.chrome.com/extensions/fakeApi#method-returnString
*/
chrome.fakeApi.returnString = function() {};
/**
* Fired when we realize it's a trap!
* @type {!ChromeEvent}
* @see https://developer.chrome.com/extensions/fakeApi#event-onTrapDetected
*/
chrome.fakeApi.onTrapDetected;""" % (datetime.now().year, sys.argv[0]))
fake_json = """// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
[
{
"namespace": "fakeJson",
"description": "Fake JSON API Stuff",
"types": [ {
"id": "CrazyEnum",
"type": "string",
"enum": ["camelCaseEnum", "Non-Characters", "5NumFirst", \
"3Just-plainOld_MEAN"]
} ],
"functions": [ {
"name": "funcWithInlineObj",
"type": "function",
"parameters": [
{
"type": "object",
"name": "inlineObj",
"description": "Evil inline object! With a super duper duper long\
string description that causes problems!",
"properties": {
"foo": {
"type": "boolean",
"optional": "true",
"description": "The foo."
},
"bar": {
"type": "integer",
"description": "The bar."
},
"baz": {
"type": "object",
"description": "Inception object.",
"properties": {
"depth": {
"type": "integer"
}
}
},
"quu": {
"type": "binary",
"description": "The array buffer"
}
}
},
{
"name": "callback",
"type": "function",
"parameters": [
{
"type": "object",
"name": "returnObj",
"properties": {
"str": { "type": "string"}
}
}
],
"description": "The callback to this heinous method"
}
],
"returns": {
"type": "object",
"properties": {
"str": { "type": "string" },
"int": { "type": "number" }
}
}
} ]
}
]"""
json_expected = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeJson.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/master/docs/closure_compilation.md
/** @fileoverview Externs generated from namespace: fakeJson */
/**
* @const
*/
chrome.fakeJson = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeJson#type-CrazyEnum
*/
chrome.fakeJson.CrazyEnum = {
CAMEL_CASE_ENUM: 'camelCaseEnum',
NON_CHARACTERS: 'Non-Characters',
_5NUM_FIRST: '5NumFirst',
_3JUST_PLAIN_OLD_MEAN: '3Just-plainOld_MEAN',
};
/**
* @param {{
* foo: (boolean|undefined),
* bar: number,
* baz: {
* depth: number
* },
* quu: ArrayBuffer
* }} inlineObj Evil inline object! With a super duper duper long string
* description that causes problems!
* @param {function({
* str: string
* }):void} callback The callback to this heinous method
* @return {{
* str: string,
* int: number
* }}
* @see https://developer.chrome.com/extensions/fakeJson#method-funcWithInlineObj
*/
chrome.fakeJson.funcWithInlineObj = function(inlineObj, callback) {};""" %
(datetime.now().year, sys.argv[0]))
class JsExternGeneratorTest(unittest.TestCase):
def _GetNamespace(self, fake_content, filename, is_idl):
"""Returns a namespace object for the given content"""
api_def = (idl_schema.Process(fake_content, filename) if is_idl
else json_parse.Parse(fake_content))
m = model.Model()
return m.AddNamespace(api_def[0], filename)
def setUp(self):
self.maxDiff = None # Lets us see the full diff when inequal.
def testBasic(self):
namespace = self._GetNamespace(fake_idl, 'fake_api.idl', True)
self.assertMultiLineEqual(expected_output,
JsExternsGenerator().Generate(namespace).Render())
def testJsonWithInlineObjects(self):
namespace = self._GetNamespace(fake_json, 'fake_api.json', False)
self.assertMultiLineEqual(json_expected,
JsExternsGenerator().Generate(namespace).Render())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
erccarls/vectorsearch | vectorsearch/word2vec.py | 1 | 4242 | from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import gensim
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
class Word2Vec(gensim.models.Word2Vec):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._stem_memory = defaultdict(set)
def most_similar(self, words={}, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
words : a dict where the words are the keys and the weights are the values.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
# if isinstance(positive, string_types) and not negative:
# # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
# positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
# positive = [
# (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in positive
# ]
# negative = [
# (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in negative
# ]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in words.items():
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
Warning("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
| apache-2.0 |
CloudBreadPaPa/azure-ml-python-seminar | code/python/ml-Iris.py | 1 | 1412 | import urllib2
# If you are using Python 3+, import urllib instead of urllib2
import json
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"],
"Values": [ [ "1", "1", "1", "1", "" ], ]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://asiasoutheast.services.azureml.net/workspaces/46d0e60b05b34558827abd41f11d204f/services/acac88a083ce443789028306375ddf56/execute?api-version=2.0&details=true'
api_key = '<change here>' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib2.Request(url, body, headers)
try:
response = urllib2.urlopen(req)
# If you are using Python 3+, replace urllib2 with urllib.request in the above code:
# req = urllib.request.Request(url, body, headers)
# response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib2.HTTPError, error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read()))
| mit |
kaiix/depot_tools | tests/trychange_unittest.py | 43 | 6250 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for trychange.py."""
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.super_mox import SuperMoxTestBase
import subprocess2
import trychange
class TryChangeTestsBase(SuperMoxTestBase):
"""Setups and tear downs the mocks but doesn't test anything as-is."""
def setUp(self):
SuperMoxTestBase.setUp(self)
self.mox.StubOutWithMock(subprocess2, 'communicate')
self.mox.StubOutWithMock(trychange, 'RunGit')
self.mox.StubOutWithMock(trychange.scm.GIT, 'Capture')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GenerateDiff')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetCheckoutRoot')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetEmail')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetPatchName')
self.mox.StubOutWithMock(trychange.scm.GIT, 'GetUpstreamBranch')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GenerateDiff')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GetCheckoutRoot')
self.mox.StubOutWithMock(trychange.scm.SVN, 'GetEmail')
self.fake_root = self.Dir()
self.expected_files = ['foo.txt', 'bar.txt']
self.options = trychange.optparse.Values()
self.options.files = self.expected_files
self.options.diff = None
self.options.name = None
self.options.email = None
self.options.exclude = []
class TryChangeUnittest(TryChangeTestsBase):
"""General trychange.py tests."""
def testMembersChanged(self):
members = [
'DieWithError', 'EPILOG', 'Escape', 'GIT', 'GIT_PATCH_DIR_BASENAME',
'GetMungedDiff', 'GuessVCS', 'GIT_BRANCH_FILE',
'HELP_STRING', 'Error', 'InvalidScript', 'NoTryServerAccess',
'OptionParser', 'PrintSuccess',
'RunCommand', 'RunGit', 'SCM', 'SVN', 'TryChange', 'USAGE', 'contextlib',
'breakpad',
'datetime', 'errno', 'fix_encoding', 'gcl', 'gclient_utils',
'gerrit_util', 'gen_parser',
'getpass', 'itertools', 'json', 'logging', 'optparse', 'os', 'posixpath',
're', 'scm', 'shutil', 'subprocess2', 'sys', 'tempfile', 'urllib',
'urllib2', 'urlparse']
# If this test fails, you should add the relevant test.
self.compareMembers(trychange, members)
class TryChangeSimpleTest(unittest.TestCase):
# Doesn't require supermox to run.
def test_flags(self):
cmd = [
'--bot', 'bot1,bot2',
'--testfilter', 'test1',
'--testfilter', 'test2',
'--user', 'joe',
'--email', '[email protected]',
]
options, args = trychange.gen_parser(None).parse_args(cmd)
self.assertEquals([], args)
# pylint: disable=W0212
bot_spec = trychange._ParseBotList(options.bot, options.testfilter)
if options.testfilter:
bot_spec = trychange._ApplyTestFilter(options.testfilter, bot_spec)
values = trychange._ParseSendChangeOptions(bot_spec, options)
self.assertEquals(
[
('user', 'joe'),
('name', None),
('email', '[email protected]'),
('bot', 'bot1:test1,test2'),
('bot', 'bot2:test1,test2'),
],
values)
def test_flags_bad_combination(self):
cmd = [
'--bot', 'bot1:test1',
'--testfilter', 'test2',
]
options, args = trychange.gen_parser(None).parse_args(cmd)
self.assertEquals([], args)
try:
# pylint: disable=W0212
trychange._ParseBotList(options.bot, options.testfilter)
self.fail()
except ValueError:
pass
class SVNUnittest(TryChangeTestsBase):
"""trychange.SVN tests."""
def testMembersChanged(self):
members = [
'AutomagicalSettings', 'CaptureStatus', 'GetCodeReviewSetting',
'ReadRootFile', 'GenerateDiff', 'GetFileNames', 'files', 'file_tuples',
]
# If this test fails, you should add the relevant test.
self.compareMembers(trychange.SVN, members)
def testBasic(self):
# pylint: disable=E1103
trychange.os.path.abspath(self.fake_root).AndReturn(self.fake_root)
trychange.scm.SVN.GetCheckoutRoot(self.fake_root).AndReturn(self.fake_root)
trychange.scm.SVN.GenerateDiff(['foo.txt', 'bar.txt'],
self.fake_root,
full_move=True,
revision=None).AndReturn('A diff')
trychange.scm.SVN.GetEmail(self.fake_root).AndReturn('[email protected]')
self.mox.ReplayAll()
svn = trychange.SVN(self.options, self.fake_root, self.options.files)
self.assertEqual(svn.GetFileNames(), self.expected_files)
self.assertEqual(svn.checkout_root, self.fake_root)
self.assertEqual(svn.GenerateDiff(), 'A diff')
class GITUnittest(TryChangeTestsBase):
"""trychange.GIT tests."""
def testMembersChanged(self):
members = [
'AutomagicalSettings', 'CaptureStatus', 'GetCodeReviewSetting',
'ReadRootFile', 'GenerateDiff', 'GetFileNames', 'files', 'file_tuples',
]
# If this test fails, you should add the relevant test.
self.compareMembers(trychange.GIT, members)
def testBasic(self):
# pylint: disable=E1103
trychange.os.path.abspath(self.fake_root).AndReturn(self.fake_root)
trychange.scm.GIT.GetCheckoutRoot(self.fake_root).AndReturn(self.fake_root)
trychange.scm.GIT.GetUpstreamBranch(self.fake_root).AndReturn('somewhere')
trychange.RunGit(['diff-index', 'HEAD'])
trychange.scm.GIT.GenerateDiff(self.fake_root,
full_move=True,
files=['foo.txt', 'bar.txt'],
branch='somewhere').AndReturn('A diff')
trychange.scm.GIT.GetPatchName(self.fake_root).AndReturn('bleh-1233')
trychange.scm.GIT.GetEmail(self.fake_root).AndReturn('[email protected]')
self.mox.ReplayAll()
git = trychange.GIT(self.options, self.fake_root, self.options.files)
self.assertEqual(git.GetFileNames(), self.expected_files)
self.assertEqual(git.checkout_root, self.fake_root)
self.assertEqual(git.GenerateDiff(), 'A diff')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/contrib/syndication/views.py | 113 | 8515 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import is_naive
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate'):
# if item_pubdate is defined for the feed, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE,
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_text(enc_url),
length = smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink = self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| apache-2.0 |
andyfaff/scipy | scipy/sparse/csgraph/tests/test_shortest_path.py | 17 | 12026 | import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
bellman_ford, construct_dist_matrix,
NegativeCycleError)
import scipy.sparse
import pytest
directed_G = np.array([[0, 3, 3, 0, 0],
[0, 0, 0, 2, 4],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[2, 0, 0, 2, 0]], dtype=float)
undirected_G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
unweighted_G = (directed_G > 0).astype(float)
directed_SP = [[0, 3, 3, 5, 7],
[3, 0, 6, 2, 4],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 4, 4, 0, 8],
[2, 5, 5, 2, 0]]
directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1],
([0, 1, 2, 3, 4],
[1, 2, 0, 4, 3])),
shape = (5, 5))
directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[3, 0, 1, np.inf, np.inf],
[2, 2, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 3],
[np.inf, np.inf, np.inf, 1, 0]]
undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1],
([0, 1, 1, 2, 2, 0, 3, 4],
[1, 0, 2, 1, 0, 2, 4, 3])),
shape = (5, 5))
undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
[0, 0, 1, np.inf, np.inf],
[1, 1, 0, np.inf, np.inf],
[np.inf, np.inf, np.inf, 0, 1],
[np.inf, np.inf, np.inf, 1, 0]]
directed_pred = np.array([[-9999, 0, 0, 1, 1],
[3, -9999, 0, 1, 1],
[-9999, -9999, -9999, -9999, -9999],
[3, 0, 0, -9999, 1],
[4, 0, 0, 4, -9999]], dtype=float)
undirected_SP = np.array([[0, 3, 3, 1, 2],
[3, 0, 6, 2, 4],
[3, 6, 0, 4, 5],
[1, 2, 4, 0, 2],
[2, 4, 5, 2, 0]], dtype=float)
undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
[np.inf, 0, np.inf, 2, np.inf],
[np.inf, np.inf, 0, np.inf, np.inf],
[1, 2, np.inf, 0, 2],
[2, np.inf, np.inf, 2, 0]], dtype=float)
undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
undirected_pred = np.array([[-9999, 0, 0, 0, 0],
[1, -9999, 0, 1, 1],
[2, 0, -9999, 0, 0],
[3, 3, 0, -9999, 3],
[4, 4, 0, 4, -9999]], dtype=float)
methods = ['auto', 'FW', 'D', 'BF', 'J']
def test_dijkstra_limit():
limits = [0, 2, np.inf]
results = [undirected_SP_limit_0,
undirected_SP_limit_2,
undirected_SP]
def check(limit, result):
SP = dijkstra(undirected_G, directed=False, limit=limit)
assert_array_almost_equal(SP, result)
for limit, result in zip(limits, results):
check(limit, result)
def test_directed():
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_undirected():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_SP)
else:
SP2 = shortest_path(undirected_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
def test_directed_sparse_zero():
# test directed sparse graph with zero-weight edge and two connected components
def check(method):
SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_sparse_zero_SP)
for method in methods:
check(method)
def test_undirected_sparse_zero():
def check(method, directed_in):
if directed_in:
SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
overwrite=False)
assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
else:
SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
for method in methods:
for directed_in in (True, False):
check(method, directed_in)
@pytest.mark.parametrize('directed, SP_ans',
((True, directed_SP),
(False, undirected_SP)))
@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
def test_dijkstra_indices_min_only(directed, SP_ans, indices):
SP_ans = np.array(SP_ans)
indices = np.array(indices, dtype=np.int64)
min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
for k in range(SP_ans.shape[0]):
min_d_ans[k] = SP_ans[min_ind_ans[k], k]
min_ind_ans[np.isinf(min_d_ans)] = -9999
SP, pred, sources = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=True)
assert_array_almost_equal(SP, min_d_ans)
assert_array_equal(min_ind_ans, sources)
SP = dijkstra(directed_G,
directed=directed,
indices=indices,
min_only=True,
return_predecessors=False)
assert_array_almost_equal(SP, min_d_ans)
@pytest.mark.parametrize('n', (10, 100, 1000))
def test_shortest_path_min_only_random(n):
np.random.seed(1234)
data = scipy.sparse.rand(n, n, density=0.5, format='lil',
random_state=42, dtype=np.float64)
data.setdiag(np.zeros(n, dtype=np.bool_))
# choose some random vertices
v = np.arange(n)
np.random.shuffle(v)
indices = v[:int(n*.1)]
ds, pred, sources = dijkstra(data,
directed=False,
indices=indices,
min_only=True,
return_predecessors=True)
for k in range(n):
p = pred[k]
s = sources[k]
while(p != -9999):
assert(sources[p] == s)
p = pred[p]
def test_shortest_path_indices():
indices = np.arange(4)
def check(func, indshape):
outshape = indshape + (5,)
SP = func(directed_G, directed=False,
indices=indices.reshape(indshape))
assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
for indshape in [(4,), (4, 1), (2, 2)]:
for func in (dijkstra, bellman_ford, johnson, shortest_path):
check(func, indshape)
assert_raises(ValueError, shortest_path, directed_G, method='FW',
indices=indices)
def test_predecessors():
SP_res = {True: directed_SP,
False: undirected_SP}
pred_res = {True: directed_pred,
False: undirected_pred}
def check(method, directed):
SP, pred = shortest_path(directed_G, method, directed=directed,
overwrite=False,
return_predecessors=True)
assert_array_almost_equal(SP, SP_res[directed])
assert_array_almost_equal(pred, pred_res[directed])
for method in methods:
for directed in (True, False):
check(method, directed)
def test_construct_shortest_path():
def check(method, directed):
SP1, pred = shortest_path(directed_G,
directed=directed,
overwrite=False,
return_predecessors=True)
SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_unweighted_path():
def check(method, directed):
SP1 = shortest_path(directed_G,
directed=directed,
overwrite=False,
unweighted=True)
SP2 = shortest_path(unweighted_G,
directed=directed,
overwrite=False,
unweighted=False)
assert_array_almost_equal(SP1, SP2)
for method in methods:
for directed in (True, False):
check(method, directed)
def test_negative_cycles():
# create a small graph with a negative cycle
graph = np.ones([5, 5])
graph.flat[::6] = 0
graph[1, 2] = -2
def check(method, directed):
assert_raises(NegativeCycleError, shortest_path, graph, method,
directed)
for method in ['FW', 'J', 'BF']:
for directed in (True, False):
check(method, directed)
def test_masked_input():
np.ma.masked_equal(directed_G, 0)
def check(method):
SP = shortest_path(directed_G, method=method, directed=True,
overwrite=False)
assert_array_almost_equal(SP, directed_SP)
for method in methods:
check(method)
def test_overwrite():
G = np.array([[0, 3, 3, 1, 2],
[3, 0, 0, 2, 4],
[3, 0, 0, 0, 0],
[1, 2, 0, 0, 2],
[2, 4, 0, 2, 0]], dtype=float)
foo = G.copy()
shortest_path(foo, overwrite=False)
assert_array_equal(foo, G)
@pytest.mark.parametrize('method', methods)
def test_buffer(method):
# Smoke test that sparse matrices with read-only buffers (e.g., those from
# joblib workers) do not cause::
#
# ValueError: buffer source array is read-only
#
G = scipy.sparse.csr_matrix([[1.]])
G.data.flags['WRITEABLE'] = False
shortest_path(G, method=method)
def test_NaN_warnings():
with pytest.warns(None) as record:
shortest_path(np.array([[0, 1], [np.nan, 0]]))
for r in record:
assert r.category is not RuntimeWarning
def test_sparse_matrices():
# Test that using lil,csr and csc sparse matrix do not cause error
G_dense = np.array([[0, 3, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 2, 0],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]], dtype=float)
SP = shortest_path(G_dense)
G_csr = scipy.sparse.csr_matrix(G_dense)
G_csc = scipy.sparse.csc_matrix(G_dense)
G_lil = scipy.sparse.lil_matrix(G_dense)
assert_array_almost_equal(SP, shortest_path(G_csr))
assert_array_almost_equal(SP, shortest_path(G_csc))
assert_array_almost_equal(SP, shortest_path(G_lil))
| bsd-3-clause |
pieterlexis/pdns | build-scripts/cherry-pick-pr.py | 4 | 1840 | #!/usr/bin/env python3
import requests
import sys
import subprocess
import argparse
def get_commits(pr):
try:
res = requests.get('https://api.github.com/repos/PowerDNS/pdns/pulls/'
'{}/commits'.format(pr)).json()
return [c['sha'] for c in res]
except (ValueError, requests.exceptions.HTTPError) as e:
print(e)
sys.exit(1)
def run_command(cmd):
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
a = argparse.ArgumentParser()
action = a.add_mutually_exclusive_group(required=True)
action.add_argument(
'-b', '--backport-unto', metavar='REF', nargs=1, help='Backport, using '
'cherry-pick, all commits from PULL_REQUEST onto REF. This is done on a '
'branch called "backport-PULL_REQUEST". When the cherry-pick fails, solve '
'the conflict as usual and run "git cherry-pick --continue --allow-empty"')
action.add_argument(
'-m', '--merge-into', metavar='REF', nargs=1, help='Take the backport-'
'PULL_REQUEST branch and merge it into REF')
a.add_argument(
'pull_request', metavar='PULL_REQUEST', type=int,
help='The PR number to backport')
args = a.parse_args()
if args.backport_unto:
command = ['git', 'checkout', '-b',
'backport-{}'.format(args.pull_request), args.backport_unto[0]]
run_command(command)
commits = get_commits(args.pull_request)
command = ['git', 'cherry-pick', '-x', '--allow-empty'] + commits
run_command(command)
if args.merge_into:
command = ['git', 'checkout', args.merge_into[0]]
run_command(command)
command = ['git', 'merge', '--no-ff',
'backport-{}'.format(args.pull_request), '-m',
'Backport #{}'.format(args.pull_request)]
run_command(command)
| gpl-2.0 |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/Queue.py | 188 | 8561 | """A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| lgpl-3.0 |
schwehr/gdal-autotest2 | python/ogr/georss_test.py | 1 | 15293 | # MOE:insert #!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test OGR handling of GeoRSS files.
This is a rewrite of:
https://trac.osgeo.org/gdal/browser/trunk/autotest/ogr/ogr_georss.py
"""
import json
import os
import sys
import unittest
import google3
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
from autotest2.gcore import gcore_util
from autotest2.ogr import ogr_util
DRIVER = ogr_util.GEORSS_DRIVER
EXT = '.xml'
DEFAULT_LAYER_NAME = 'OGRGeoRSS'
# Values used in some of the atom tests.
ATOM_FIELD_VALUES = [
('title', 'Atom draft-07 snapshot',
ogr.OFTString), ('link_rel', 'alternate',
ogr.OFTString), ('link_type', 'text/html', ogr.OFTString),
('link_href', 'http://example.org/2005/04/02/atom',
ogr.OFTString), ('link2_rel', 'enclosure',
ogr.OFTString), ('link2_type', 'audio/mpeg',
ogr.OFTString), ('link2_length', '1337',
ogr.OFTInteger),
('link2_href', 'http://example.org/audio/ph34r_my_podcast.mp3',
ogr.OFTString), ('id', 'tag:example.org,2003:3.2397',
ogr.OFTString), ('updated', '2005/07/31 12:29:29+00',
ogr.OFTDateTime),
('published', '2003/12/13 08:29:29-04',
ogr.OFTDateTime), ('author_name', 'Mark Pilgrim',
ogr.OFTString), ('author_uri', 'http://example.org/',
ogr.OFTString),
('author_email', '[email protected]',
ogr.OFTString), ('contributor_name', 'Sam Ruby',
ogr.OFTString), ('contributor2_name', 'Joe Gregorio',
ogr.OFTString), ('content_type', 'xhtml',
ogr.OFTString),
('content_xml_lang', 'en',
ogr.OFTString), ('content_xml_base', 'http://diveintomark.org/',
ogr.OFTString)
]
def setUpModule():
ogr_util.SetupTestEnv()
def CreateField(layer, name, field_type=ogr.OFTString):
field_definition = ogr.FieldDefn(name, field_type)
layer.CreateField(field_definition)
field_definition.Destroy()
@ogr_util.SkipIfDriverMissing(DRIVER)
class OgrGeoRSSTest(ogr_util.DriverTestCase):
def setUp(self):
super(OgrGeoRSSTest, self).setUp(DRIVER, EXT)
# Helper for GeoRSS tests. Used by GeoRss1x.
def ogrGeoRssTestAtom(self, ogr_filepath):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
self.assertIsNone(lyr.GetSpatialRef())
feat = lyr.GetNextFeature()
for field_value in ATOM_FIELD_VALUES:
self.assertEquals(feat.GetFieldAsString(field_value[0]), field_value[1])
self.assertIn('<div xmlns="http://www.w3.org/1999/xhtml">',
feat.GetFieldAsString('content'))
# Helper for GeoRSS tests. Used by GeoRss2~9.
def ogrGeoRssTest(self, ogr_filepath, only_first_feature):
ds = self.CheckOpen(ogr_filepath)
lyr = ds.GetLayerByIndex(0)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertNotIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (2 49)'
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A point')
self.assertEquals(feat.GetFieldAsString('author'), 'Author')
self.assertEquals(feat.GetFieldAsString('link'), 'http://gdal.org')
self.assertEquals(
feat.GetFieldAsString('pubDate'), '2008/12/07 20:13:00+02')
self.assertEquals(feat.GetFieldAsString('category'), 'First category')
self.assertEquals(feat.GetFieldAsString('category_domain'), 'first_domain')
self.assertEquals(feat.GetFieldAsString('category2'), 'Second category')
self.assertEquals(
feat.GetFieldAsString('category2_domain'), 'second_domain')
feat = lyr.GetNextFeature()
expected_wkt = 'LINESTRING (2 48,2.1 48.1,2.2 48.0)'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A line')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A polygon')
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'
if only_first_feature is False:
self.assertEquals(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
self.assertEquals(feat.GetFieldAsString('title'), 'A box')
# Creates a RSS 2.0 document
def ogrGeoRssCreate(self, ogr_filepath, options):
ds = self.driver.CreateDataSource(ogr_filepath, options=options)
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('title', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('author', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('link', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('pubDate', ogr.OFTDateTime))
lyr.CreateField(ogr.FieldDefn('description', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category_domain', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2_domain', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A point')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetField('category', 'First category')
dst_feat.SetField('category_domain', 'first_domain')
dst_feat.SetField('category2', 'Second category')
dst_feat.SetField('category2_domain', 'second_domain')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A line')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt('LINESTRING (2 48,2.1 48.1,2.2 48.0)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A polygon')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A box')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(
ogr.CreateGeometryFromWkt(
'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
def testOgrGeorss1(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1AtomNs(self):
filepath = ogr_util.GetTestFilePath('georss/atom_rfc_sample_atom_ns.xml')
self.ogrGeoRssTestAtom(filepath)
def testOgrGeorss1bis(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
ds = self.driver.CreateDataSource(filepath, options=['FORMAT=ATOM'])
lyr = ds.CreateLayer('georss')
for field_value in ATOM_FIELD_VALUES:
lyr.CreateField(ogr.FieldDefn(field_value[0], field_value[2]))
lyr.CreateField(ogr.FieldDefn('content', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
for field_value in ATOM_FIELD_VALUES:
dst_feat.SetField(field_value[0], field_value[1])
dst_feat.SetField(
'content', '<div xmlns="http://www.w3.org/1999/xhtml">'
'<p><i>[Update: The Atom draft is finished.]</i></p></div>')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
def testOgrGeorss1ter(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test_atom.xml')
self.ogrGeoRssTestAtom(filepath)
# Test reading a RSS 2.0 document with GeoRSS simple geometries
def testOgrGeorss2(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_simple.xml')
self.ogrGeoRssTest(filepath, False)
# Test reading a RSS 2.0 document with GeoRSS GML geometries
def testOgrGeorss3(self):
filepath = ogr_util.GetTestFilePath('georss/test_georss_gml.xml')
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in Simple dialect
# (doesn't need read support)
def testOgrGeorss4and5(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_4.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, [])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 5.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in GML dialect
# (doesn't need read support)
def testOgrGeorss6and7(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_6.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=GML'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 7.
self.ogrGeoRssTest(filepath, False)
# Test writing a RSS 2.0 document in W3C Geo dialect
# (doesn't need read support)
def testOgrGeorss8and9(self):
filepath = ogr_util.GetTestFilePath('/vsimem/ogr_georss_8.xml')
with gcore_util.GdalUnlinkWhenDone(filepath):
self.ogrGeoRssCreate(filepath, ['GEOM_DIALECT=W3C_GEO'])
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 9.
self.ogrGeoRssTest(filepath, True)
# Test writing a RSS 2.0 document in GML dialect with EPSG:32631
def testOgrGeorss10and11(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
ds = self.driver.CreateDataSource(filepath)
with gcore_util.GdalUnlinkWhenDone(filepath):
with gcore_util.ErrorHandler('CPLQuietErrorHandler'):
lyr = ds.CreateLayer('georss', srs=srs)
self.assertIsNone(lyr)
ds = self.driver.CreateDataSource(filepath, options=['GEOM_DIALECT=GML'])
lyr = ds.CreateLayer('georss', srs=srs)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (500000 4000000)'))
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
# Close the files and force a flush to the filesystem.
lyr = None
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 11.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
self.assertIsNotNone(lyr.GetSpatialRef())
self.assertTrue(lyr.GetSpatialRef().IsSame(srs))
self.assertIn('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]',
lyr.GetSpatialRef().ExportToWkt())
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (500000 4000000)'
self.assertEqual(feat.GetGeometryRef().ExportToWkt(), expected_wkt)
# TODO(b/71817518): ogr_georss_12
def testOgrGeorss13and14(self):
filepath = ogr_util.GetTestFilePath('/vsimem/test32631.rss')
with gcore_util.GdalUnlinkWhenDone(filepath):
ds = self.driver.CreateDataSource(
filepath, options=['USE_EXTENSIONS=YES'])
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('myns_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('field2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('ogr_field3', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('myns_field', 'val')
dst_feat.SetField('field2', 'val2')
dst_feat.SetField('ogr_field3', 'val3')
self.assertEqual(lyr.CreateFeature(dst_feat), 0)
ds = None
src = self.CheckOpen(filepath)
lyr = src.GetLayerByName('georss')
self.assertIsNotNone(lyr)
# Portion that was in 14.
ds = self.CheckOpen(filepath)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
self.assertEquals(feat.GetFieldAsString('myns_field'), 'val')
self.assertEquals(feat.GetFieldAsString('ogr_field2'), 'val2')
self.assertEquals(feat.GetFieldAsString('ogr_field3'), 'val3')
# ogr_georss_15 redundant as all temp files were tested with in memory file.
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.