repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
SlimSaber/kernel_oneplus_msm8974 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
brentdax/swift | utils/gyb_syntax_support/CommonNodes.py | 11 | 1985 | from Child import Child
from Node import Node # noqa: I201
COMMON_NODES = [
Node('Decl', kind='Syntax'),
Node('Expr', kind='Syntax'),
Node('Stmt', kind='Syntax'),
Node('Type', kind='Syntax'),
Node('Pattern', kind='Syntax'),
Node('UnknownDecl', kind='Decl'),
Node('UnknownExpr', kind='Expr'),
Node('UnknownStmt', kind='Stmt'),
Node('UnknownType', kind='Type'),
Node('UnknownPattern', kind='Pattern'),
# code-block-item = (decl | stmt | expr) ';'?
Node('CodeBlockItem', kind='Syntax', omit_when_empty=True,
description="""
A CodeBlockItem is any Syntax node that appears on its own line inside
a CodeBlock.
""",
children=[
Child('Item', kind='Syntax',
description="The underlying node inside the code block.",
node_choices=[
Child('Decl', kind='Decl'),
Child('Stmt', kind='Stmt'),
Child('Expr', kind='Expr'),
Child('TokenList', kind='TokenList'),
Child('NonEmptyTokenList', kind='NonEmptyTokenList'),
]),
Child('Semicolon', kind='SemicolonToken',
description="""
If present, the trailing semicolon at the end of the item.
""",
is_optional=True),
Child('ErrorTokens', kind='Syntax', is_optional=True),
]),
# code-block-item-list -> code-block-item code-block-item-list?
Node('CodeBlockItemList', kind='SyntaxCollection',
element='CodeBlockItem'),
# code-block -> '{' stmt-list '}'
Node('CodeBlock', kind='Syntax',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Statements', kind='CodeBlockItemList'),
Child('RightBrace', kind='RightBraceToken'),
]),
]
| apache-2.0 |
abrt/faf | src/pyfaf/storage/migrations/versions/1c7edfbf8941_drop_reportunknownpackage_running_fields.py | 1 | 3689 | # Copyright (C) 2015 ABRT Team
# Copyright (C) 2015 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Drop reportunknownpackage.running fields remove installed prefix
Revision ID: 1c7edfbf8941
Revises: 43bd2d59838e
Create Date: 2015-03-18 15:19:28.412310
"""
from alembic.op import (create_foreign_key, create_unique_constraint, execute,
drop_constraint, drop_column, alter_column, add_column)
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c7edfbf8941'
down_revision = '43bd2d59838e'
t = "reportunknownpackages"
def upgrade() -> None:
# constraint name is autogenerated differently between pg versions
try:
drop_constraint("reportunknownpackages_report_id_key", t)
except: # pylint: disable=bare-except
execute('ROLLBACK')
drop_constraint(
"reportunknownpackages_report_id_type_name_installed_epoch_i_key",
t)
drop_constraint("reportunknownpackages_installed_arch_id_fkey", t)
drop_column(t, "running_epoch")
drop_column(t, "running_version")
drop_column(t, "running_release")
drop_column(t, "running_arch_id")
alter_column(t, "installed_epoch", new_column_name="epoch")
alter_column(t, "installed_version", new_column_name="version")
alter_column(t, "installed_release", new_column_name="release")
alter_column(t, "installed_arch_id", new_column_name="arch_id")
create_foreign_key("reportunknownpackages_arch_id_fkey", t,
"archs", ["arch_id"], ["id"])
fields = ["report_id", "type", "name", "epoch",
"version", "release", "arch_id"]
create_unique_constraint("reportunknownpackages_report_id_key", t, fields)
def downgrade() -> None:
drop_constraint("reportunknownpackages_report_id_key", t)
drop_constraint("reportunknownpackages_arch_id_fkey", t)
add_column(t, sa.Column('running_epoch', sa.Integer(),
nullable=True))
add_column(t, sa.Column('running_version', sa.String(64),
nullable=True))
add_column(t, sa.Column('running_release', sa.String(64),
nullable=True))
add_column(t, sa.Column('running_arch_id', sa.Integer(),
sa.ForeignKey('archs.id'),
nullable=True))
alter_column(t, "epoch", new_column_name="installed_epoch")
alter_column(t, "version", new_column_name="installed_version")
alter_column(t, "release", new_column_name="installed_release")
alter_column(t, "arch_id", new_column_name="installed_arch_id")
fields = ["report_id", "type", "name", "installed_epoch",
"installed_version", "installed_release", "installed_arch_id",
"running_epoch", "running_version", "running_release",
"running_arch_id"]
create_unique_constraint("reportunknownpackages_report_id_key", t, fields)
create_foreign_key("reportunknownpackages_installed_arch_id_fkey", t,
"archs", ["installed_arch_id"], ["id"])
| gpl-3.0 |
izonder/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py | 177 | 1269 | r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
val = ur'\\'.join([
v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
for v in val.split(ur'\\')
])
if val[0] in u'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
| apache-2.0 |
pombredanne/SourceForge-Allura | ForgeTracker/forgetracker/widgets/ticket_search.py | 2 | 5248 | import tg
from pylons import c
import ew as ew_core
import ew.jinja2_ew as ew
from allura.lib.widgets import form_fields as ffw
class TicketSearchResults(ew_core.SimpleForm):
template='jinja:forgetracker:templates/tracker_widgets/ticket_search_results.html'
defaults=dict(
ew_core.SimpleForm.defaults,
solr_error=None,
count=None,
limit=None,
query=None,
tickets=None,
sortable_custom_fields=None,
page=1,
sort=None,
columns=None)
class fields(ew_core.NameList):
page_list=ffw.PageList()
page_size=ffw.PageSize()
lightbox=ffw.Lightbox(name='col_list',trigger='#col_menu')
def resources(self):
yield ew.JSLink('tracker_js/ticket-list.js')
yield ew.CSSLink('tracker_css/ticket-list.css')
for r in super(TicketSearchResults, self).resources():
yield r
class MassEdit(ew_core.SimpleForm):
template='jinja:forgetracker:templates/tracker_widgets/mass_edit.html'
defaults=dict(
ew_core.SimpleForm.defaults,
count=None,
limit=None,
query=None,
tickets=None,
page=1,
sort=None)
class fields(ew_core.NameList):
page_list=ffw.PageList()
page_size=ffw.PageSize()
lightbox=ffw.Lightbox(name='col_list',trigger='#col_menu')
def resources(self):
yield ew.JSLink('tracker_js/ticket-list.js')
yield ew.CSSLink('tracker_css/ticket-list.css')
for r in super(MassEdit, self).resources():
yield r
class MassEditForm(ew_core.Widget):
template='jinja:forgetracker:templates/tracker_widgets/mass_edit_form.html'
defaults=dict(
ew_core.Widget.defaults,
globals=None,
query=None,
cancel_href=None,
limit=None,
sort=None)
def resources(self):
yield ew.JSLink('tracker_js/mass-edit.js')
class SearchHelp(ffw.Lightbox):
defaults=dict(
ffw.Lightbox.defaults,
name='search_help_modal',
trigger='a.search_help_modal',
content="""<div style="height:400px; overflow:auto;"><h1>Searching for tickets</h1>
<p>Searches use <a href="http://www.solrtutorial.com/solr-query-syntax.html" target="_blank">solr lucene query syntax</a>. Use the following fields in tracker ticket searches:</p>
<ul>
<li>User who owns the ticket - assigned_to_s</li>
<li>Labels assigned to the ticket - labels</li>
<li>Milestone the ticket is assigned to - _milestone</li>
<li>Last modified date - mod_date_dt</li>
<li>Body of the ticket - text</li>
<li>Number of ticket - ticket_num</li>
<li>User who created the ticket - reported_by_s</li>
<li>Status of the ticket - status</li>
<li>Title of the ticket - summary</li>
<li>Votes up/down of the ticket - votes_up_i/votes_down_i (if enabled in tool options)</li>
<li>Votes total of the ticket - votes_total_i</li>
<li>Custom field - the field name with an underscore in front, like _custom</li>
</ul>
<h2>Example searches</h2>
<p>Any ticket that is not closed in the 1.0 milestone with "foo" in the title</p>
<div class="codehilite"><pre>!status:closed AND summary:foo* AND _milestone:1.0</pre></div>
<p>Tickets with the label "foo" but not the label "bar":</p>
<div class="codehilite"><pre>labels:foo AND -labels:bar</pre></div>
<p>Tickets assigned to or added by a user with the username "admin1" and the custom field "size" set to 2</p>
<div class="codehilite"><pre>(assigned_to_s:admin1 or reported_by_s:admin1) AND _size:2</pre></div>
<p>The ticket has "foo" as the title or the body with a number lower than 50</p>
<div class="codehilite"><pre>(summary:foo or text:foo) AND ticket_num:[* TO 50]</pre></div>
<p>Tickets last modified in April 2012</p>
<div class="codehilite"><pre>mod_date_dt:[2012-04-01T00:00:00Z TO 2012-04-30T23:59:59Z]</pre></div>
<h2>Saving searches</h2>
<p>Ticket searches may be saved for later use by project administrators. To save a search, click "Edit Searches" in the tracker sidebar. Click "Add Bin" then enter a summary and search terms for the saved search. Your search will now show up in the sidebar under "Searches" with a count of how many tickets match the query.</p>
<h2>Sorting search results</h2>
<p>Ticket search results can be sorted by clicking the header of the column you want to sort by. The first click will sort the results in ascending order. Clicking the header again will sort the column in descending order. In addition to sorting by the column headers, you can manually sort on these properties:</p>
<ul>
<li>Labels assigned to the ticket - labels_s</li>
<li>Milestone the ticket is assigned to - _milestone_s</li>
<li>Last modified date - mod_date_dt</li>
<li>Body of the ticket - text_s</li>
<li>Number of ticket - ticket_num_i</li>
<li>User who created the ticket - reported_by_s</li>
<li>Status of the ticket - status_s</li>
<li>Title of the ticket - snippet_s</li>
<li>Custom field - the field name with an _ in front and _s at the end like _custom_s</li>
</ul>
<p>You can use these properties by appending them to the url (only one sort allowed at a time) like this:</p>
<div class="codehilite"><pre>/p/yourproject/tickets/search/?q=_milestone:1.0&sort=snippet_s+asc</pre></div></div>
""")
| apache-2.0 |
rpadilha/rvpsite | rvpsite/blog/migrations/0001_initial.py | 1 | 2442 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-15 16:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import s3direct.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blogs',
fields=[
('title', models.CharField(max_length=50, verbose_name='TÍTULO')),
('slug', models.SlugField(primary_key=True, serialize=False, verbose_name='SLUG')),
('category', models.CharField(choices=[('catalogos', 'CATÁLOGOS'), ('eventos', 'EVENTOS'), ('novidades', 'NOVIDADES'), ('promocoes', 'PROMOÇÕES'), ('outros', 'OUTROS')], max_length=15, verbose_name='CATEGORIA')),
('publish', models.BooleanField(default=False, verbose_name='PUBLICAR?')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='CRIADO EM')),
],
options={
'verbose_name': 'CADASTRO DE NOTÍCIA',
'verbose_name_plural': 'CADASTRO DE NOTÍCIAS',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='Contents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_out', models.IntegerField(verbose_name='ORDEM DE POSTAGEM')),
('text', models.TextField(blank=True, max_length=700, verbose_name='TEXTO')),
('picture', s3direct.fields.S3DirectField(blank=True, verbose_name='IMAGEM')),
('inverse', models.BooleanField(default=False, verbose_name='INVERTER ORDEM?')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blogs', verbose_name='NOME')),
],
options={
'verbose_name': 'CONTEÚDO DE NOTÍCIA',
'verbose_name_plural': 'CONTEÚDOS DE NOTÍCIA',
'ordering': ('order_out',),
},
),
migrations.AlterUniqueTogether(
name='blogs',
unique_together=set([('slug', 'created_at')]),
),
migrations.AlterUniqueTogether(
name='contents',
unique_together=set([('title', 'order_out')]),
),
]
| agpl-3.0 |
pranalik/frappe-bb | frappe/model/db_schema.py | 15 | 12290 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Syncs a database table to the `DocType` (metadata)
.. note:: This module is only used internally
"""
import os
import frappe
from frappe import _
from frappe.utils import cstr, cint
class InvalidColumnName(frappe.ValidationError): pass
type_map = {
'Currency': ('decimal', '18,6')
,'Int': ('int', '11')
,'Float': ('decimal', '18,6')
,'Percent': ('decimal', '18,6')
,'Check': ('int', '1')
,'Small Text': ('text', '')
,'Long Text': ('longtext', '')
,'Code': ('text', '')
,'Text Editor': ('text', '')
,'Date': ('date', '')
,'Datetime': ('datetime', '6')
,'Time': ('time', '6')
,'Text': ('text', '')
,'Data': ('varchar', '255')
,'Link': ('varchar', '255')
,'Dynamic Link':('varchar', '255')
,'Password': ('varchar', '255')
,'Select': ('varchar', '255')
,'Read Only': ('varchar', '255')
,'Attach': ('varchar', '255')
}
default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',\
'parentfield', 'parenttype', 'idx']
default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today']
# -------------------------------------------------
# Class database table
# -------------------------------------------------
class DbTable:
def __init__(self, doctype, prefix = 'tab'):
self.doctype = doctype
self.name = prefix + doctype
self.columns = {}
self.current_columns = {}
# lists for change
self.add_column = []
self.change_type = []
self.add_index = []
self.drop_index = []
self.set_default = []
# load
self.get_columns_from_docfields()
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar(255) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar(255),
owner varchar(255),
docstatus int(1) default '0',
parent varchar(255),
parentfield varchar(255),
parenttype varchar(255),
idx int(8),
%sindex parent(parent))
ENGINE=InnoDB
CHARACTER SET=utf8""" % (self.name, add_text))
def get_columns_from_docfields(self):
"""
get columns from docfields and custom fields
"""
fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1)
precisions = {}
if not frappe.flags.in_install_app:
custom_fl = frappe.db.sql("""\
SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1)
if custom_fl: fl += custom_fl
# get precision from property setters
for ps in frappe.get_all("Property Setter", fields=["field_name", "value"],
filters={"doc_type": self.doctype, "doctype_or_field": "DocField", "property": "precision"}):
precisions[ps.field_name] = ps.value
for f in fl:
self.columns[f['fieldname']] = DbColumn(self, f['fieldname'],
f['fieldtype'], f.get('length'), f.get('default'), f.get('search_index'),
f.get('options'), precisions.get(f['fieldname']) or f.get('precision'))
def get_columns_from_db(self):
self.show_columns = frappe.db.sql("desc `%s`" % self.name)
for c in self.show_columns:
self.current_columns[c[0]] = {'name': c[0], 'type':c[1], 'index':c[3], 'default':c[4]}
def get_column_definitions(self):
column_list = [] + default_columns
ret = []
for k in self.columns.keys():
if k not in column_list:
d = self.columns[k].get_definition()
if d:
ret.append('`'+ k+ '` ' + d)
column_list.append(k)
return ret
def get_index_definitions(self):
ret = []
for key, col in self.columns.items():
if col.set_index and col.fieldtype in type_map and \
type_map.get(col.fieldtype)[0] not in ('text', 'longtext'):
ret.append('index `' + key + '`(`' + key + '`)')
return ret
# GET foreign keys
def get_foreign_keys(self):
fk_list = []
txt = frappe.db.sql("show create table `%s`" % self.name)[0][1]
for line in txt.split('\n'):
if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1:
try:
fk_list.append((line.split('`')[3], line.split('`')[1]))
except IndexError:
pass
return fk_list
# Drop foreign keys
def drop_foreign_keys(self):
if not self.drop_foreign_key:
return
fk_list = self.get_foreign_keys()
# make dictionary of constraint names
fk_dict = {}
for f in fk_list:
fk_dict[f[0]] = f[1]
# drop
for col in self.drop_foreign_key:
frappe.db.sql("set foreign_key_checks=0")
frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname]))
frappe.db.sql("set foreign_key_checks=1")
def sync(self):
if not self.name in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name):
self.create()
else:
self.alter()
def alter(self):
self.get_columns_from_db()
for col in self.columns.values():
col.check(self.current_columns.get(col.fieldname, None))
query = []
for col in self.add_column:
query.append("add column `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.change_type:
query.append("change `{}` `{}` {}".format(col.fieldname, col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("drop index `{}`".format(col.fieldname))
for col in list(set(self.set_default).difference(set(self.change_type))):
if col.fieldname=="name":
continue
if not col.default:
col_default = "null"
else:
col_default = '"{}"'.format(col.default.replace('"', '\\"'))
query.append('alter column `{}` set default {}'.format(col.fieldname, col_default))
if query:
frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query)))
class DbColumn:
def __init__(self, table, fieldname, fieldtype, length, default, set_index, options, precision):
self.table = table
self.fieldname = fieldname
self.fieldtype = fieldtype
self.length = length
self.set_index = set_index
self.default = default
self.options = options
self.precision = precision
def get_definition(self, with_default=1):
ret = get_definition(self.fieldtype, self.precision)
if with_default and self.default and (self.default not in default_shortcuts) \
and not self.default.startswith(":") and ret not in ['text', 'longtext']:
ret += ' default "' + self.default.replace('"', '\"') + '"'
return ret
def check(self, current_def):
column_def = self.get_definition(0)
# no columns
if not column_def:
return
# to add?
if not current_def:
self.fieldname = validate_column_name(self.fieldname)
self.table.add_column.append(self)
return
# type
if current_def['type'] != column_def:
self.table.change_type.append(self)
# index
else:
if (current_def['index'] and not self.set_index):
self.table.drop_index.append(self)
if (not current_def['index'] and self.set_index and not (column_def in ['text', 'longtext'])):
self.table.add_index.append(self)
# default
if (self.default_changed(current_def) and (self.default not in default_shortcuts) and not cstr(self.default).startswith(":") and not (column_def in ['text','longtext'])):
self.table.set_default.append(self)
def default_changed(self, current_def):
if "decimal" in current_def['type']:
try:
return float(current_def['default'])!=float(self.default)
except TypeError:
return True
else:
return current_def['default'] != self.default
class DbManager:
"""
Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc...
#TODO:
0. Simplify / create settings for the restore database source folder
0a. Merge restore database and extract_sql(from frappe_server_tools).
1. Setter and getter for different mysql variables.
2. Setter and getter for mysql variables at global level??
"""
def __init__(self,db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_variables(self,regex):
"""
Get variables that match the passed pattern regex
"""
return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex))
def get_table_schema(self,table):
"""
Just returns the output of Desc tables.
"""
return list(self.db.sql("DESC `%s`"%table))
def get_tables_list(self,target=None):
"""get list of tables"""
if target:
self.db.use(target)
return [t[0] for t in self.db.sql("SHOW TABLES")]
def create_user(self, user, password, host):
#Create user if it doesn't exist.
try:
if password:
self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user[:16], host, password))
else:
self.db.sql("CREATE USER '%s'@'%s';" % (user[:16], host))
except Exception:
raise
def delete_user(self, target, host):
# delete user if exists
try:
self.db.sql("DROP USER '%s'@'%s';" % (target, host))
except Exception, e:
if e.args[0]==1396:
pass
else:
raise
def create_database(self,target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE IF NOT EXISTS `%s` ;" % target)
def drop_database(self,target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target)
def grant_all_privileges(self, target, user, host):
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target, user, host))
def grant_select_privilges(self, db, table, user, host):
if table:
self.db.sql("GRANT SELECT ON %s.%s to '%s'@'%s';" % (db, table, user, host))
else:
self.db.sql("GRANT SELECT ON %s.* to '%s'@'%s';" % (db, user, host))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
def restore_database(self,target,source,user,password):
from frappe.utils import make_esc
esc = make_esc('$ ')
os.system("mysql -u %s -p%s -h%s %s < %s" % \
(esc(user), esc(password), esc(frappe.db.host), esc(target), source))
def drop_table(self,table_name):
"""drop table if exists"""
if not table_name in self.get_tables_list():
return
self.db.sql("DROP TABLE IF EXISTS %s "%(table_name))
def validate_column_name(n):
n = n.replace(' ','_').strip().lower()
import re
if re.search("[\W]", n):
frappe.throw(_("Fieldname {0} cannot contain letters, numbers or spaces").format(n), InvalidColumnName)
return n
def updatedb(dt):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = frappe.db.sql("select ifnull(issingle, 0) from tabDocType where name=%s", (dt,))
if not res:
raise Exception, 'Wrong doctype "%s" in updatedb' % dt
if not res[0][0]:
frappe.db.commit()
tab = DbTable(dt, 'tab')
tab.sync()
frappe.db.begin()
def remove_all_foreign_keys():
frappe.db.sql("set foreign_key_checks = 0")
frappe.db.commit()
for t in frappe.db.sql("select name from tabDocType where ifnull(issingle,0)=0"):
dbtab = DbTable(t[0])
try:
fklist = dbtab.get_foreign_keys()
except Exception, e:
if e.args[0]==1146:
fklist = []
else:
raise
for f in fklist:
frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1]))
def get_definition(fieldtype, precision=None):
d = type_map.get(fieldtype)
if not d:
return
ret = d[0]
if d[1]:
length = d[1]
if fieldtype in ["Float", "Currency", "Percent"] and cint(precision) > 6:
length = '18,9'
ret += '(' + length + ')'
return ret
def add_column(doctype, column_name, fieldtype, precision=None):
frappe.db.commit()
frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype,
column_name, get_definition(fieldtype, precision)))
| mit |
JonasSC/SuMPF | tests/tests/_internal/test_interpolation.py | 1 | 11458 | # This file is a part of the "SuMPF" package
# Copyright (C) 2018-2021 Jonas Schulte-Coerne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests the interpolation functions"""
import hypothesis.extra.numpy
import numpy
import pytest
import sumpf._internal as sumpf_internal
def xs_ys(data, interpolation):
"""A helper function, that creates arrays of x and y values from the data pairs,
that have been created by hypothesis.
"""
if data:
xs, ys = map(numpy.array, zip(*sorted(data)))
else:
xs = numpy.empty(0)
ys = numpy.empty(0)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (xs <= 0).any():
xs -= xs.min()
xs += 1e-15
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys) + 1e-15
return xs, ys
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=0, max_size=2 ** 12,
unique_by=lambda t: t[0]))
def test_supporting_points(interpolation, data):
"""Tests if the interpolation at a supporting point is exactly the given y value"""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
assert (func(xs, xs, ys) == ys).all()
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=1, max_size=2 ** 12,
unique_by=lambda t: t[0]),
x=hypothesis.strategies.lists(elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), min_size=0, max_size=2 ** 12)) # pylint: disable=line-too-long
def test_x_as_scalar_and_vector(interpolation, data, x):
"""Tests if running a vectorized interpolation returns the same result as the scalar version."""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
x = numpy.array(x)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (x <= 0).any():
x -= x.min()
x += 1e-15
scalar = [func(s, xs, ys) for s in x]
vector = list(func(x, xs, ys))
assert scalar == pytest.approx(vector, nan_ok=True)
@pytest.mark.filterwarnings("ignore:divide by zero")
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
k=hypothesis.strategies.floats(min_value=1e-15, max_value=1.0 - 1e-15))
def test_interpolation(interpolation, xs, ys, k): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches
"""Tests the computation of an interpolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x = xs[0] + k * (xs[1] - xs[0])
hypothesis.assume(x not in xs) # due to the limited precision of floating point numbers, this can still happen
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
assert func(x, xs, ys) == pytest.approx(numpy.interp(x, xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
log_xs = numpy.log2(xs)
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(numpy.log2(x), log_xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
assert func(x, xs, ys) == pytest.approx(numpy.interp(numpy.log2(x), log_xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(x, xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
if k < 0.5:
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
if numpy.log(x) - numpy.log(xs[0]) < numpy.log(xs[1]) - numpy.log(x):
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log", "ignore:invalid value encountered", "ignore:overflow encountered in exp") # pylint: disable=line-too-long
@hypothesis.given(xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=0.0, max_value=1e12), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
delta_x=hypothesis.strategies.floats(min_value=1e-15, max_value=1e15))
def test_extrapolation(xs, ys, interpolation, delta_x): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches,too-many-statements
"""Tests the computation of an extrapolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x0 = xs[0] * (1.0 - delta_x) - delta_x
x1 = xs[1] * (1.0 + delta_x) + delta_x
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x0, xs, ys) == 0.0
assert func(x1, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x0, xs, ys) == 1.0
assert func(x1, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
m = (ys[1] - ys[0]) / (xs[1] - xs[0])
n0 = ys[0] - m * xs[0]
n1 = ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(m * x0 + n0)
assert func(x1, xs, ys) == pytest.approx(m * x1 + n1)
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_xs = numpy.log2(xs)
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (log_xs[1] - log_xs[0])
r0 = numpy.exp2(m * numpy.log2(x0) + log_ys[0] - m * log_xs[0])
r1 = numpy.exp2(m * numpy.log2(x1) + log_ys[1] - m * log_xs[1])
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
m = (ys[1] - ys[0]) / (log_xs[1] - log_xs[0])
r0 = m * numpy.log2(x0) + ys[0] - m * log_xs[0]
r1 = m * numpy.log2(x1) + ys[1] - m * log_xs[1]
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (xs[1] - xs[0])
n0 = log_ys[0] - m * xs[0]
n1 = log_ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(numpy.exp2(m * x0 + n0))
assert func(x1, xs, ys) == pytest.approx(numpy.exp2(m * x1 + n1))
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
| lgpl-3.0 |
chudaol/edx-platform | common/lib/capa/capa/tests/response_xml_factory.py | 30 | 34410 | from lxml import etree
from abc import ABCMeta, abstractmethod
class ResponseXMLFactory(object):
""" Abstract base class for capa response XML factories.
Subclasses override create_response_element and
create_input_element to produce XML of particular response types"""
__metaclass__ = ABCMeta
@abstractmethod
def create_response_element(self, **kwargs):
""" Subclasses override to return an etree element
representing the capa response XML
(e.g. <numericalresponse>).
The tree should NOT contain any input elements
(such as <textline />) as these will be added later."""
return None
@abstractmethod
def create_input_element(self, **kwargs):
""" Subclasses override this to return an etree element
representing the capa input XML (such as <textline />)"""
return None
def build_xml(self, **kwargs):
""" Construct an XML string for a capa response
based on **kwargs.
**kwargs is a dictionary that will be passed
to create_response_element() and create_input_element().
See the subclasses below for other keyword arguments
you can specify.
For all response types, **kwargs can contain:
*question_text*: The text of the question to display,
wrapped in <p> tags.
*explanation_text*: The detailed explanation that will
be shown if the user answers incorrectly.
*script*: The embedded Python script (a string)
*num_responses*: The number of responses to create [DEFAULT: 1]
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
Returns a string representation of the XML tree.
"""
# Retrieve keyward arguments
question_text = kwargs.get('question_text', '')
explanation_text = kwargs.get('explanation_text', '')
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
# The root is <problem>
root = etree.Element("problem")
# Add a script if there is one
if script:
script_element = etree.SubElement(root, "script")
script_element.set("type", "loncapa/python")
script_element.text = str(script)
# The problem has a child <p> with question text
question = etree.SubElement(root, "p")
question.text = question_text
# Add the response(s)
for __ in range(int(num_responses)):
response_element = self.create_response_element(**kwargs)
root.append(response_element)
# Add input elements
for __ in range(int(num_inputs)):
input_element = self.create_input_element(**kwargs)
if not None == input_element:
response_element.append(input_element)
# The problem has an explanation of the solution
if explanation_text:
explanation = etree.SubElement(root, "solution")
explanation_div = etree.SubElement(explanation, "div")
explanation_div.set("class", "detailed-solution")
explanation_div.text = explanation_text
return etree.tostring(root)
@staticmethod
def textline_input_xml(**kwargs):
""" Create a <textline/> XML element
Uses **kwargs:
*math_display*: If True, then includes a MathJax display of user input
*size*: An integer representing the width of the text line
"""
math_display = kwargs.get('math_display', False)
size = kwargs.get('size', None)
input_element = etree.Element('textline')
if math_display:
input_element.set('math', '1')
if size:
input_element.set('size', str(size))
return input_element
@staticmethod
def choicegroup_input_xml(**kwargs):
""" Create a <choicegroup> XML element
Uses **kwargs:
*choice_type*: Can be "checkbox", "radio", or "multiple"
*choices*: List of True/False values indicating whether
a particular choice is correct or not.
Users must choose *all* correct options in order
to be marked correct.
DEFAULT: [True]
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
"""
# Names of group elements
group_element_names = {
'checkbox': 'checkboxgroup',
'radio': 'radiogroup',
'multiple': 'choicegroup'
}
# Retrieve **kwargs
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert choice_type in group_element_names
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
# name attribute and the text of the element
if name:
choice_element.text = str(name)
choice_element.set("name", str(name))
return group_element
class NumericalResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <numericalresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <numericalresponse> XML element.
Uses **kwarg keys:
*answer*: The correct answer (e.g. "5")
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
response_element = etree.Element('numericalresponse')
if answer:
if isinstance(answer, float):
response_element.set('answer', repr(answer))
else:
response_element.set('answer', str(answer))
if tolerance:
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class CustomResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <customresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <customresponse> XML element.
Uses **kwargs:
*cfn*: the Python code to run. Can be inline code,
or the name of a function defined in earlier <script> tags.
Should have the form: cfn(expect, answer_given, student_answers)
where expect is a value (see below),
answer_given is a single value (for 1 input)
or a list of values (for multiple inputs),
and student_answers is a dict of answers by input ID.
*expect*: The value passed to the function cfn
*answer*: Inline script that calculates the answer
"""
# Retrieve **kwargs
cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element
response_element = etree.Element("customresponse")
if cfn:
response_element.set('cfn', str(cfn))
if expect:
response_element.set('expect', str(expect))
if answer:
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SchematicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <schematicresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create the <schematicresponse> XML element.
Uses *kwargs*:
*answer*: The Python script used to evaluate the answer.
"""
answer_script = kwargs.get('answer', None)
# Create the <schematicresponse> element
response_element = etree.Element("schematicresponse")
# Insert the <answer> script if one is provided
if answer_script:
answer_element = etree.SubElement(response_element, "answer")
answer_element.set("type", "loncapa/python")
answer_element.text = str(answer_script)
return response_element
def create_input_element(self, **kwargs):
""" Create the <schematic> XML element.
Although <schematic> can have several attributes,
(*height*, *width*, *parts*, *analyses*, *submit_analysis*, and *initial_value*),
none of them are used in the capa module.
For testing, we create a bare-bones version of <schematic>."""
return etree.Element("schematic")
class CodeResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <coderesponse> XML trees """
def build_xml(self, **kwargs):
# Since we are providing an <answer> tag,
# we should override the default behavior
# of including a <solution> tag as well
kwargs['explanation_text'] = None
return super(CodeResponseXMLFactory, self).build_xml(**kwargs)
def create_response_element(self, **kwargs):
"""
Create a <coderesponse> XML element.
Uses **kwargs:
*initial_display*: The code that initially appears in the textbox
[DEFAULT: "Enter code here"]
*answer_display*: The answer to display to the student
[DEFAULT: "This is the correct answer!"]
*grader_payload*: A JSON-encoded string sent to the grader
[DEFAULT: empty dict string]
*allowed_files*: A space-separated string of file names.
[DEFAULT: None]
*required_files*: A space-separated string of file names.
[DEFAULT: None]
"""
# Get **kwargs
initial_display = kwargs.get("initial_display", "Enter code here")
answer_display = kwargs.get("answer_display", "This is the correct answer!")
grader_payload = kwargs.get("grader_payload", '{}')
allowed_files = kwargs.get("allowed_files", None)
required_files = kwargs.get("required_files", None)
# Create the <coderesponse> element
response_element = etree.Element("coderesponse")
# If files are involved, create the <filesubmission> element.
has_files = allowed_files or required_files
if has_files:
filesubmission_element = etree.SubElement(response_element, "filesubmission")
if allowed_files:
filesubmission_element.set("allowed_files", allowed_files)
if required_files:
filesubmission_element.set("required_files", required_files)
# Create the <codeparam> element.
codeparam_element = etree.SubElement(response_element, "codeparam")
# Set the initial display text
initial_element = etree.SubElement(codeparam_element, "initial_display")
initial_element.text = str(initial_display)
# Set the answer display text
answer_element = etree.SubElement(codeparam_element, "answer_display")
answer_element.text = str(answer_display)
# Set the grader payload string
grader_element = etree.SubElement(codeparam_element, "grader_payload")
grader_element.text = str(grader_payload)
# Create the input within the response
if not has_files:
input_element = etree.SubElement(response_element, "textbox")
input_element.set("mode", "python")
return response_element
def create_input_element(self, **kwargs):
# Since we create this in create_response_element(),
# return None here
return None
class ChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <choiceresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <choiceresponse> element """
return etree.Element("choiceresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element."""
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class FormulaResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <formularesponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <formularesponse> element.
*sample_dict*: A dictionary of the form:
{ VARIABLE_NAME: (MIN, MAX), ....}
This specifies the range within which
to numerically sample each variable to check
student answers.
[REQUIRED]
*num_samples*: The number of times to sample the student's answer
to numerically compare it to the correct answer.
*tolerance*: The tolerance within which answers will be accepted
[DEFAULT: 0.01]
*answer*: The answer to the problem. Can be a formula string
or a Python variable defined in a script
(e.g. "$calculated_answer" for a Python variable
called calculated_answer)
[REQUIRED]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the formula for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
"""
# Retrieve kwargs
sample_dict = kwargs.get("sample_dict", None)
num_samples = kwargs.get("num_samples", None)
tolerance = kwargs.get("tolerance", 0.01)
answer = kwargs.get("answer", None)
hint_list = kwargs.get("hints", None)
assert answer
assert sample_dict and num_samples
# Create the <formularesponse> element
response_element = etree.Element("formularesponse")
# Set the sample information
sample_str = self._sample_str(sample_dict, num_samples, tolerance)
response_element.set("samples", sample_str)
# Set the tolerance
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("type", "tolerance")
responseparam_element.set("default", str(tolerance))
# Set the answer
response_element.set("answer", str(answer))
# Include hints, if specified
if hint_list:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list:
# For each hint, create a <formulahint> element
formulahint_element = etree.SubElement(hintgroup_element, "formulahint")
# We could sample a different range, but for simplicity,
# we use the same sample string for the hints
# that we used previously.
formulahint_element.set("samples", sample_str)
formulahint_element.set("answer", str(hint_prompt))
formulahint_element.set("name", str(hint_name))
# For each hint, create a <hintpart> element
# corresponding to the <formulahint>
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
text_element = etree.SubElement(hintpart_element, "text")
text_element.text = str(hint_text)
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
def _sample_str(self, sample_dict, num_samples, tolerance):
# Loncapa uses a special format for sample strings:
# "x,y,z@4,5,3:10,12,8#4" means plug in values for (x,y,z)
# from within the box defined by points (4,5,3) and (10,12,8)
# The "#4" means to repeat 4 times.
variables = [str(v) for v in sample_dict.keys()]
low_range_vals = [str(f[0]) for f in sample_dict.values()]
high_range_vals = [str(f[1]) for f in sample_dict.values()]
sample_str = (
",".join(sample_dict.keys()) + "@" +
",".join(low_range_vals) + ":" +
",".join(high_range_vals) +
"#" + str(num_samples)
)
return sample_str
class ImageResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <imageresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <imageresponse> element."""
return etree.Element("imageresponse")
def create_input_element(self, **kwargs):
""" Create the <imageinput> element.
Uses **kwargs:
*src*: URL for the image file [DEFAULT: "/static/image.jpg"]
*width*: Width of the image [DEFAULT: 100]
*height*: Height of the image [DEFAULT: 100]
*rectangle*: String representing the rectangles the user should select.
Take the form "(x1,y1)-(x2,y2)", where the two (x,y)
tuples define the corners of the rectangle.
Can include multiple rectangles separated by a semicolon, e.g.
"(490,11)-(556,98);(242,202)-(296,276)"
*regions*: String representing the regions a user can select
Take the form "[ [[x1,y1], [x2,y2], [x3,y3]],
[[x1,y1], [x2,y2], [x3,y3]] ]"
(Defines two regions, each with 3 points)
REQUIRED: Either *rectangle* or *region* (or both)
"""
# Get the **kwargs
src = kwargs.get("src", "/static/image.jpg")
width = kwargs.get("width", 100)
height = kwargs.get("height", 100)
rectangle = kwargs.get('rectangle', None)
regions = kwargs.get('regions', None)
assert rectangle or regions
# Create the <imageinput> element
input_element = etree.Element("imageinput")
input_element.set("src", str(src))
input_element.set("width", str(width))
input_element.set("height", str(height))
if rectangle:
input_element.set("rectangle", rectangle)
if regions:
input_element.set("regions", regions)
return input_element
class JavascriptResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <javascriptresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <javascriptresponse> element.
Uses **kwargs:
*generator_src*: Name of the JS file to generate the problem.
*grader_src*: Name of the JS file to grade the problem.
*display_class*: Name of the class used to display the problem
*display_src*: Name of the JS file used to display the problem
*param_dict*: Dictionary of parameters to pass to the JS
"""
# Get **kwargs
generator_src = kwargs.get("generator_src", None)
grader_src = kwargs.get("grader_src", None)
display_class = kwargs.get("display_class", None)
display_src = kwargs.get("display_src", None)
param_dict = kwargs.get("param_dict", {})
# Both display_src and display_class given,
# or neither given
assert((display_src and display_class) or
(not display_src and not display_class))
# Create the <javascriptresponse> element
response_element = etree.Element("javascriptresponse")
if generator_src:
generator_element = etree.SubElement(response_element, "generator")
generator_element.set("src", str(generator_src))
if grader_src:
grader_element = etree.SubElement(response_element, "grader")
grader_element.set("src", str(grader_src))
if display_class and display_src:
display_element = etree.SubElement(response_element, "display")
display_element.set("class", str(display_class))
display_element.set("src", str(display_src))
for (param_name, param_val) in param_dict.items():
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("name", str(param_name))
responseparam_element.set("value", str(param_val))
return response_element
def create_input_element(self, **kwargs):
""" Create the <javascriptinput> element """
return etree.Element("javascriptinput")
class MultipleChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <multiplechoiceresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <multiplechoiceresponse> element"""
return etree.Element('multiplechoiceresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class TrueFalseResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <truefalseresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <truefalseresponse> element"""
return etree.Element('truefalseresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class OptionResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <optionresponse> XML"""
def create_response_element(self, **kwargs):
""" Create the <optionresponse> element"""
return etree.Element("optionresponse")
def create_input_element(self, **kwargs):
""" Create the <optioninput> element.
Uses **kwargs:
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
assert options_list and correct_option
assert len(options_list) > 1
assert correct_option in options_list
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
# Set the "options" attribute
# Format: "('first', 'second', 'third')"
options_attr_string = u",".join([u"'{}'".format(o) for o in options_list])
options_attr_string = u"({})".format(options_attr_string)
optioninput_element.set('options', options_attr_string)
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
return optioninput_element
class StringResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <stringresponse> XML """
def create_response_element(self, **kwargs):
""" Create a <stringresponse> XML element.
Uses **kwargs:
*answer*: The correct answer (a string) [REQUIRED]
*case_sensitive*: Whether the response is case-sensitive (True/False)
[DEFAULT: True]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
*regexp*: Whether the response is regexp
*additional_answers*: list of additional asnwers.
*non_attribute_answers*: list of additional answers to be coded in the
non-attribute format
"""
# Retrieve the **kwargs
answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", None)
hint_list = kwargs.get('hints', None)
hint_fn = kwargs.get('hintfn', None)
regexp = kwargs.get('regexp', None)
additional_answers = kwargs.get('additional_answers', [])
non_attribute_answers = kwargs.get('non_attribute_answers', [])
assert answer
# Create the <stringresponse> element
response_element = etree.Element("stringresponse")
# Set the answer attribute
response_element.set("answer", unicode(answer))
# Set the case sensitivity and regexp:
type_value = ''
if case_sensitive is not None:
type_value += "cs" if case_sensitive else "ci"
type_value += ' regexp' if regexp else ''
if type_value:
response_element.set("type", type_value.strip())
# Add the hints if specified
if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
if hint_list:
assert not hint_fn
for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
if hint_fn:
assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
for additional_answer in additional_answers:
additional_node = etree.SubElement(response_element, "additional_answer") # pylint: disable=no-member
additional_node.set("answer", additional_answer)
for answer in non_attribute_answers:
additional_node = etree.SubElement(response_element, "additional_answer") # pylint: disable=no-member
additional_node.text = answer
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class AnnotationResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <annotationresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <annotationresponse> element """
return etree.Element("annotationresponse")
def create_input_element(self, **kwargs):
""" Create a <annotationinput> element."""
input_element = etree.Element("annotationinput")
text_children = [
{'tag': 'title', 'text': kwargs.get('title', 'super cool annotation')},
{'tag': 'text', 'text': kwargs.get('text', 'texty text')},
{'tag': 'comment', 'text': kwargs.get('comment', 'blah blah erudite comment blah blah')},
{'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below')},
{'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag')}
]
for child in text_children:
etree.SubElement(input_element, child['tag']).text = child['text']
default_options = [('green', 'correct'), ('eggs', 'incorrect'), ('ham', 'partially-correct')]
options = kwargs.get('options', default_options)
options_element = etree.SubElement(input_element, 'options')
for (description, correctness) in options:
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
option_element.text = description
return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if not isinstance(choices[0], (list, tuple)):
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if not isinstance(answers, (list, tuple)):
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
| agpl-3.0 |
bosstb/HaberPush | youtube_dl/extractor/eitb.py | 71 | 3278 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
parse_iso8601,
sanitized_Request,
)
class EitbIE(InfoExtractor):
IE_NAME = 'eitb.tv'
_VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/',
'md5': 'edf4436247185adee3ea18ce64c47998',
'info_dict': {
'id': '4090227752001',
'ext': 'mp4',
'title': '60 minutos (Lasa y Zabala, 30 años)',
'description': 'Programa de reportajes de actualidad.',
'duration': 3996.76,
'timestamp': 1381789200,
'upload_date': '20131014',
'tags': list,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id,
video_id, 'Downloading video JSON')
media = video['web_media'][0]
formats = []
for rendition in media['RENDITIONS']:
video_url = rendition.get('PMD_URL')
if not video_url:
continue
tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000)
format_id = 'http'
if tbr:
format_id += '-%d' % int(tbr)
formats.append({
'url': rendition['PMD_URL'],
'format_id': format_id,
'width': int_or_none(rendition.get('FRAME_WIDTH')),
'height': int_or_none(rendition.get('FRAME_HEIGHT')),
'tbr': tbr,
})
hls_url = media.get('HLS_SURL')
if hls_url:
request = sanitized_Request(
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
headers={'Referer': url})
token_data = self._download_json(
request, video_id, 'Downloading auth token', fatal=False)
if token_data:
token = token_data.get('token')
if token:
formats.extend(self._extract_m3u8_formats(
'%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False))
hds_url = media.get('HDS_SURL')
if hds_url:
formats.extend(self._extract_f4m_formats(
'%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'),
video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'],
'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'),
'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'),
'duration': float_or_none(media.get('LENGTH'), 1000),
'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '),
'tags': media.get('TAGS'),
'formats': formats,
}
| mit |
jessrosenfield/pants | src/python/pants/backend/codegen/targets/jaxb_library.py | 15 | 1613 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
class JaxbLibrary(JvmTarget):
"""Generates a stub Java library from jaxb xsd files."""
def __init__(self, payload=None, package=None, language='java', **kwargs):
"""
:param package: java package (com.company.package) in which to generate the output java files.
If unspecified, Pants guesses it from the file path leading to the schema
(xsd) file. This guess is accurate only if the .xsd file is in a path like
``.../com/company/package/schema.xsd``. Pants looks for packages that start with 'com', 'org',
or 'net'.
:param string language: only 'java' is supported. Default: 'java'
"""
payload = payload or Payload()
payload.add_fields({
'package': PrimitiveField(package),
'jaxb_language': PrimitiveField(language),
})
super(JaxbLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('codegen')
self.add_labels('jaxb')
if language != 'java':
raise ValueError('Language "{lang}" not supported for {class_type}'
.format(lang=language, class_type=type(self).__name__))
@property
def package(self):
return self.payload.package
| apache-2.0 |
juvoinc/airflow | airflow/ti_deps/dep_context.py | 12 | 4987 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from airflow.ti_deps.deps.dag_unpaused_dep import DagUnpausedDep
from airflow.ti_deps.deps.dagrun_exists_dep import DagrunRunningDep
from airflow.ti_deps.deps.exec_date_after_start_date_dep import ExecDateAfterStartDateDep
from airflow.ti_deps.deps.not_running_dep import NotRunningDep
from airflow.ti_deps.deps.not_skipped_dep import NotSkippedDep
from airflow.ti_deps.deps.pool_has_space_dep import PoolHasSpaceDep
from airflow.ti_deps.deps.runnable_exec_date_dep import RunnableExecDateDep
from airflow.ti_deps.deps.valid_state_dep import ValidStateDep
from airflow.utils.state import State
class DepContext(object):
"""
A base class for contexts that specifies which dependencies should be evaluated in
the context for a task instance to satisfy the requirements of the context. Also
stores state related to the context that can be used by dependendency classes.
For example there could be a SomeRunContext that subclasses this class which has
dependencies for:
- Making sure there are slots available on the infrastructure to run the task instance
- A task-instance's task-specific dependencies are met (e.g. the previous task
instance completed successfully)
- ...
:param deps: The context-specific dependencies that need to be evaluated for a
task instance to run in this execution context.
:type deps: set(BaseTIDep)
:param flag_upstream_failed: This is a hack to generate the upstream_failed state
creation while checking to see whether the task instance is runnable. It was the
shortest path to add the feature. This is bad since this class should be pure (no
side effects).
:type flag_upstream_failed: boolean
:param ignore_all_deps: Whether or not the context should ignore all ignoreable
dependencies. Overrides the other ignore_* parameters
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
"""
def __init__(
self,
deps=None,
flag_upstream_failed=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False):
self.deps = deps or set()
self.flag_upstream_failed = flag_upstream_failed
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
# In order to be able to get queued a task must have one of these states
QUEUEABLE_STATES = {
State.FAILED,
State.NONE,
State.QUEUED,
State.SCHEDULED,
State.SKIPPED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
}
# The minimum execution context for task instances to be executed.
MIN_EXEC_DEPS = {
NotRunningDep(),
NotSkippedDep(),
RunnableExecDateDep(),
}
# Context to get the dependencies that need to be met in order for a task instance to
# be backfilled.
QUEUE_DEPS = MIN_EXEC_DEPS | {
ValidStateDep(QUEUEABLE_STATES)
}
# Dependencies that need to be met for a given task instance to be able to get run by an
# executor. This class just extends QueueContext by adding dependencies for resources.
RUN_DEPS = QUEUE_DEPS | {
DagTISlotsAvailableDep(),
PoolHasSpaceDep(),
}
# TODO(aoen): SCHEDULER_DEPS is not coupled to actual execution in any way and
# could easily be modified or removed from the scheduler causing this dependency to become
# outdated and incorrect. This coupling should be created (e.g. via a dag_deps analog of
# ti_deps that will be used in the scheduler code) to ensure that the logic here is
# equivalent to the logic in the scheduler.
# Dependencies that need to be met for a given task instance to get scheduled by the
# scheduler, then queued by the scheduler, then run by an executor.
SCHEDULER_DEPS = RUN_DEPS | {
DagrunRunningDep(),
DagUnpausedDep(),
ExecDateAfterStartDateDep(),
}
| apache-2.0 |
40223231/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/SDL.py | 603 | 1813 | from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
| gpl-3.0 |
detiber/lib_openshift | lib_openshift/models/v1_git_build_source.py | 2 | 5508 | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1GitBuildSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'uri': 'str',
'ref': 'str',
'http_proxy': 'str',
'https_proxy': 'str'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'uri': 'uri',
'ref': 'ref',
'http_proxy': 'httpProxy',
'https_proxy': 'httpsProxy'
}
def __init__(self, uri=None, ref=None, http_proxy=None, https_proxy=None):
"""
V1GitBuildSource - a model defined in Swagger
"""
self._uri = uri
self._ref = ref
self._http_proxy = http_proxy
self._https_proxy = https_proxy
@property
def uri(self):
"""
Gets the uri of this V1GitBuildSource.
URI points to the source that will be built. The structure of the source will depend on the type of build to run
:return: The uri of this V1GitBuildSource.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this V1GitBuildSource.
URI points to the source that will be built. The structure of the source will depend on the type of build to run
:param uri: The uri of this V1GitBuildSource.
:type: str
"""
self._uri = uri
@property
def ref(self):
"""
Gets the ref of this V1GitBuildSource.
Ref is the branch/tag/ref to build.
:return: The ref of this V1GitBuildSource.
:rtype: str
"""
return self._ref
@ref.setter
def ref(self, ref):
"""
Sets the ref of this V1GitBuildSource.
Ref is the branch/tag/ref to build.
:param ref: The ref of this V1GitBuildSource.
:type: str
"""
self._ref = ref
@property
def http_proxy(self):
"""
Gets the http_proxy of this V1GitBuildSource.
HTTPProxy is a proxy used to reach the git repository over http
:return: The http_proxy of this V1GitBuildSource.
:rtype: str
"""
return self._http_proxy
@http_proxy.setter
def http_proxy(self, http_proxy):
"""
Sets the http_proxy of this V1GitBuildSource.
HTTPProxy is a proxy used to reach the git repository over http
:param http_proxy: The http_proxy of this V1GitBuildSource.
:type: str
"""
self._http_proxy = http_proxy
@property
def https_proxy(self):
"""
Gets the https_proxy of this V1GitBuildSource.
HTTPSProxy is a proxy used to reach the git repository over https
:return: The https_proxy of this V1GitBuildSource.
:rtype: str
"""
return self._https_proxy
@https_proxy.setter
def https_proxy(self, https_proxy):
"""
Sets the https_proxy of this V1GitBuildSource.
HTTPSProxy is a proxy used to reach the git repository over https
:param https_proxy: The https_proxy of this V1GitBuildSource.
:type: str
"""
self._https_proxy = https_proxy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1GitBuildSource.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
skyling/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
dahlstrom-g/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_vars.py | 7 | 26282 | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import math
import pickle
from _pydev_bundle.pydev_imports import quote
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES, NUMPY_FLOATING_POINT_TYPES
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
try:
from collections import OrderedDict
except:
OrderedDict = dict
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import VariableWithOffset
SENTINEL_VALUE = []
DEFAULT_DF_FORMAT = "s"
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_current_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
# Note: commented this error message out (it may commonly happen
# if a message asking for a frame is issued while a thread is paused
# but the thread starts running before the message is actually
# handled).
# Leaving code to uncomment during tests.
# err_msg = '''find_frame: frame not found.
# Looking for thread_id:%s, frame_id:%s
# Current thread_id:%s, available frames:
# %s\n
# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
#
# sys.stderr.write(err_msg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2).
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def get_offset(attrs):
"""
Extract offset from the given attributes.
:param attrs: The string of a compound variable fields split by tabs.
If an offset is given, it must go the first element.
:return: The value of offset if given or 0.
"""
offset = 0
if attrs is not None:
try:
offset = int(attrs.split('\t')[0])
except ValueError:
pass
return offset
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
:note: PyCharm supports progressive loading of large collections and uses the `attrs`
parameter to pass the offset, e.g. 300\t\\obj\tattr1\tattr2 should return
the value of attr2 starting from the 300th element. This hack makes it possible
to add the support of progressive loading without extending of the protocol.
"""
offset = get_offset(attrs)
orig_attrs, attrs = attrs, attrs.split('\t', 1)[1] if offset else attrs
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, orig_attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
offset = get_offset(attrs)
attrs = attrs.split('\t', 1)[1] if offset else attrs
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if value is SENTINEL_VALUE:
# It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with
# `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in
# list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main
# reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.
value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = float('inf')
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return xml
class ExceedingArrayDimensionsException(Exception):
pass
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException()
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def get_column_formatter_by_type(initial_format, column_type):
if column_type in NUMPY_NUMERIC_TYPES and initial_format:
if column_type in NUMPY_FLOATING_POINT_TYPES and initial_format.strip() == DEFAULT_DF_FORMAT:
# use custom formatting for floats when default formatting is set
return array_default_format(column_type)
return initial_format
else:
return array_default_format(column_type)
def get_formatted_row_elements(row, iat, dim, cols, format, dtypes):
for c in range(cols):
val = iat[row, c] if dim > 1 else iat[row]
col_formatter = get_column_formatter_by_type(format, dtypes[c])
try:
yield ("%" + col_formatter) % (val,)
except TypeError:
yield ("%" + DEFAULT_DF_FORMAT) % (val,)
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
DATAFRAME_HEADER_LOAD_MAX_SIZE = 100
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = 'O'
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
xml = slice_to_xml(name, num_rows, num_cols, format, "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
xml += header_data_to_xml(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
return xml
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(column_type):
return get_column_formatter_by_type(format, column_type)
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, formatted_row_elements, format)
return xml
def array_data_to_xml(rows, cols, get_row, format):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % row
for value in get_row(row):
xml += var_to_xml(value, '', format=format)
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, quote(format), type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))
bounds = col_bounds[col]
col_format = "%" + col_to_format(dtypes[col])
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(dtypes[col]), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
def is_able_to_format_number(format):
try:
format % math.pi
except Exception:
return False
return True
TYPE_TO_XML_CONVERTERS = {
"ndarray": array_to_xml,
"DataFrame": dataframe_to_xml,
"Series": dataframe_to_xml,
"GeoDataFrame": dataframe_to_xml,
"GeoSeries": dataframe_to_xml
}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
| apache-2.0 |
syscoin/syscoin | test/functional/mining_getblocktemplate_longpoll.py | 1 | 3612 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
import random
import threading
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import get_rpc_proxy
from test_framework.wallet import MiniWallet
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
self.log.info("Test that longpoll waits if we do nothing")
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
miniwallets = [ MiniWallet(node) for node in self.nodes ]
self.log.info("Test that longpoll will terminate if another node generates a block")
miniwallets[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
miniwallets[0].generate(1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Add enough mature utxos to the wallets, so that all txs spend confirmed coins
self.nodes[0].generate(100)
self.sync_blocks()
self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll")
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20)
miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes),
fee_rate=fee_rate)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| mit |
tchellomello/home-assistant | homeassistant/auth/permissions/merge.py | 19 | 1786 | """Merging of policies."""
from typing import Dict, List, Set, cast
from .types import CategoryType, PolicyType
def merge_policies(policies: List[PolicyType]) -> PolicyType:
"""Merge policies."""
new_policy: Dict[str, CategoryType] = {}
seen: Set[str] = set()
for policy in policies:
for category in policy:
if category in seen:
continue
seen.add(category)
new_policy[category] = _merge_policies(
[policy.get(category) for policy in policies]
)
cast(PolicyType, new_policy)
return new_policy
def _merge_policies(sources: List[CategoryType]) -> CategoryType:
"""Merge a policy."""
# When merging policies, the most permissive wins.
# This means we order it like this:
# True > Dict > None
#
# True: allow everything
# Dict: specify more granular permissions
# None: no opinion
#
# If there are multiple sources with a dict as policy, we recursively
# merge each key in the source.
policy: CategoryType = None
seen: Set[str] = set()
for source in sources:
if source is None:
continue
# A source that's True will always win. Shortcut return.
if source is True:
return True
assert isinstance(source, dict)
if policy is None:
policy = cast(CategoryType, {})
assert isinstance(policy, dict)
for key in source:
if key in seen:
continue
seen.add(key)
key_sources = []
for src in sources:
if isinstance(src, dict):
key_sources.append(src.get(key))
policy[key] = _merge_policies(key_sources)
return policy
| apache-2.0 |
robinro/ansible | test/units/plugins/lookup/test_lastpass.py | 153 | 6918 | # (c)2016 Andrew Zenk <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from argparse import ArgumentParser
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.plugins.lookup.lastpass import LookupModule, LPass, LPassException
MOCK_ENTRIES = [{'username': 'user',
'name': 'Mock Entry',
'password': 't0pS3cret passphrase entry!',
'url': 'https://localhost/login',
'notes': 'Test\nnote with multiple lines.\n',
'id': '0123456789'}]
class MockLPass(LPass):
_mock_logged_out = False
_mock_disconnected = False
def _lookup_mock_entry(self, key):
for entry in MOCK_ENTRIES:
if key == entry['id'] or key == entry['name']:
return entry
def _run(self, args, stdin=None, expected_rc=0):
# Mock behavior of lpass executable
base_options = ArgumentParser(add_help=False)
base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
p = ArgumentParser()
sp = p.add_subparsers(help='command', dest='subparser_name')
logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
field_group = show_p.add_mutually_exclusive_group(required=True)
for field in MOCK_ENTRIES[0].keys():
field_group.add_argument("--{0}".format(field), default=False, action='store_true')
field_group.add_argument('--field', default=None)
show_p.add_argument('selector', help='Unique Name or ID')
args = p.parse_args(args)
def mock_exit(output='', error='', rc=0):
if rc != expected_rc:
raise LPassException(error)
return output, error
if args.color != 'never':
return mock_exit(error='Error: Mock only supports --color=never', rc=1)
if args.subparser_name == 'logout':
if self._mock_logged_out:
return mock_exit(error='Error: Not currently logged in', rc=1)
logged_in_error = 'Are you sure you would like to log out? [Y/n]'
if stdin and stdin.lower() == 'n\n':
return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
elif stdin and stdin.lower() == 'y\n':
return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
else:
return mock_exit(error='Error: aborted response', rc=1)
if args.subparser_name == 'show':
if self._mock_logged_out:
return mock_exit(error='Error: Could not find decryption key.' +
' Perhaps you need to login with `lpass login`.', rc=1)
if self._mock_disconnected:
return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
mock_entry = self._lookup_mock_entry(args.selector)
if args.field:
return mock_exit(output=mock_entry.get(args.field, ''))
elif args.password:
return mock_exit(output=mock_entry.get('password', ''))
elif args.username:
return mock_exit(output=mock_entry.get('username', ''))
elif args.url:
return mock_exit(output=mock_entry.get('url', ''))
elif args.name:
return mock_exit(output=mock_entry.get('name', ''))
elif args.id:
return mock_exit(output=mock_entry.get('id', ''))
elif args.notes:
return mock_exit(output=mock_entry.get('notes', ''))
raise LPassException('We should never get here')
class DisconnectedMockLPass(MockLPass):
_mock_disconnected = True
class LoggedOutMockLPass(MockLPass):
_mock_logged_out = True
class TestLPass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_lastpass_cli_path(self):
lp = MockLPass(path='/dev/null')
self.assertEqual('/dev/null', lp.cli_path)
def test_lastpass_build_args_logout(self):
lp = MockLPass()
self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
def test_lastpass_logged_in_true(self):
lp = MockLPass()
self.assertTrue(lp.logged_in)
def test_lastpass_logged_in_false(self):
lp = LoggedOutMockLPass()
self.assertFalse(lp.logged_in)
def test_lastpass_show_disconnected(self):
lp = DisconnectedMockLPass()
with self.assertRaises(LPassException):
lp.get_field('0123456789', 'username')
def test_lastpass_show(self):
lp = MockLPass()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(), lp.get_field(entry_id, k))
class TestLastpassPlugin(unittest.TestCase):
@patch('ansible.plugins.lookup.lastpass.LPass', new=MockLPass)
def test_lastpass_plugin_normal(self):
lookup_plugin = LookupModule()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(),
lookup_plugin.run([entry_id], field=k)[0])
@patch('ansible.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
def test_lastpass_plugin_logged_out(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
@patch('ansible.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
def test_lastpass_plugin_disconnected(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
| gpl-3.0 |
molebot/brython | www/src/Lib/test/test_importlib/builtin/test_loader.py | 26 | 3341 | import importlib
from importlib import machinery
from .. import abc
from .. import util
from . import util as builtin_util
import sys
import types
import unittest
class LoaderTests(abc.LoaderTests):
"""Test load_module() for built-in modules."""
verification = {'__name__': 'errno', '__package__': '',
'__loader__': machinery.BuiltinImporter}
def verify(self, module):
"""Verify that the module matches against what it should have."""
self.assertIsInstance(module, types.ModuleType)
for attr, value in self.verification.items():
self.assertEqual(getattr(module, attr), value)
self.assertIn(module.__name__, sys.modules)
load_module = staticmethod(lambda name:
machinery.BuiltinImporter.load_module(name))
def test_module(self):
# Common case.
with util.uncache(builtin_util.NAME):
module = self.load_module(builtin_util.NAME)
self.verify(module)
def test_package(self):
# Built-in modules cannot be a package.
pass
def test_lacking_parent(self):
# Built-in modules cannot be a package.
pass
def test_state_after_failure(self):
# Not way to force an imoprt failure.
pass
def test_module_reuse(self):
# Test that the same module is used in a reload.
with util.uncache(builtin_util.NAME):
module1 = self.load_module(builtin_util.NAME)
module2 = self.load_module(builtin_util.NAME)
self.assertIs(module1, module2)
def test_unloadable(self):
name = 'dssdsdfff'
assert name not in sys.builtin_module_names
with self.assertRaises(ImportError) as cm:
self.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_already_imported(self):
# Using the name of a module already imported but not a built-in should
# still fail.
assert hasattr(importlib, '__file__') # Not a built-in.
with self.assertRaises(ImportError) as cm:
self.load_module('importlib')
self.assertEqual(cm.exception.name, 'importlib')
class InspectLoaderTests(unittest.TestCase):
"""Tests for InspectLoader methods for BuiltinImporter."""
def test_get_code(self):
# There is no code object.
result = machinery.BuiltinImporter.get_code(builtin_util.NAME)
self.assertIsNone(result)
def test_get_source(self):
# There is no source.
result = machinery.BuiltinImporter.get_source(builtin_util.NAME)
self.assertIsNone(result)
def test_is_package(self):
# Cannot be a package.
result = machinery.BuiltinImporter.is_package(builtin_util.NAME)
self.assertTrue(not result)
def test_not_builtin(self):
# Modules not built-in should raise ImportError.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(machinery.BuiltinImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method(builtin_util.BAD_NAME)
self.assertRaises(builtin_util.BAD_NAME)
def test_main():
from test.support import run_unittest
run_unittest(LoaderTests, InspectLoaderTests)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
byterom/android_external_chromium_org | build/get_landmines.py | 26 | 2454 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: To delete generated mojo class files.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
print 'ninja dependency cycle: crbug.com/408192'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
rmm-fcul/workshops | 2015_graz/binary_choice/two_arenas_real_real/casu_utils.py | 5 | 8116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
a library of functions used in CASU controller dynamics. Got a lot of
messy code that would be neater like this
RM, Feb 2015
'''
import numpy as np
from assisipy import casu
#import matplotlib.cm as cm
from datetime import datetime
import parsing
import time
### ============= maths ============= ###
#{{{ rolling_avg
def rolling_avg(x, n):
'''
given the sample x, provide a rolling average taking n samples per data point.
NOT a quick solution, but easy...
'''
y = np.zeros((len(x),))
for ctr in range(len(x)):
y[ctr] = np.sum(x[ctr:(ctr+n)])
return y/n
#}}}
### ============= general behaviour ============= ###
#{{{ measure_ir_sensors
def measure_ir_sensors(mycasu, detect_data):
''' count up sensors that detect a bee, plus rotate history array '''
# don't discriminate between specific directions, so just accumulate all
count = 0
for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):
if (val > t):
count += 1
#print "raw:",
#print ",".join(["{:.2f}".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])
#mycasu.total_count += count # historical count over all time
detect_data = np.roll(detect_data, 1) # step all positions back
detect_data[0] = count # and overwrite the first entry (this was rolled
# around, so is the oldest entry -- and to become the newest now)
# allow ext usage to apply window -- remain agnostic here during collection.
return detect_data, count
#}}}
#{{{ heater_one_step
def heater_one_step(h):
'''legacy function'''
return detect_bee_proximity_saturated(h)
def detect_bee_proximity_saturated(h):
# measure proximity
detect_data, count = measure_ir_sensors(h, h.detect_data)
h.detect_data = detect_data
# overall bee count for this casu
sat_count = min(h.sat_lim, count) # saturates
return sat_count
#}}}
#{{{ find_mean_ext_temp
def find_mean_ext_temp(h):
r = []
for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:
r.append(h.get_temp(sensor))
if len(r):
mean = sum(r) / float(len(r))
else:
mean = 0.0
return mean
#}}}
### ============= inter-casu comms ============= ###
#{{{ comms functions
def transmit_my_count(h, sat_count, dest='accomplice'):
s = "{}".format(sat_count)
if h.verb > 1:
print "\t[i]==> {} send msg ({} by): '{}' bees, to {}".format(
h._thename, len(s), s, dest)
h.send_message(dest, s)
#TODO: this is non-specific, i.e., any message from anyone is assumed to have
# the right form. For heterogeneous neighbours, we need to check identity as
# well
def recv_all_msgs(h, retry_cnt=0, max_recv=None):
'''
continue to read message bffer until no more messages.
as list of parsed messages parsed into (src, float) pairs
'''
msgs = []
try_cnt = 0
while(True):
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
msgs.append((src, bee_cnt))
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename,
BLU, ENDC)
if h.verb > 1:
#print dir(msg)
print msg.items()
if(max_recv is not None and len(msgs) >= max_recv):
break
else:
# buffer emptied, return
try_cnt += 1
if try_cnt > retry_cnt:
break
return msgs
def recv_neighbour_msg(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = int(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
def recv_neighbour_msg_w_src(h):
''' provide the source of a message as well as the message count'''
bee_cnt = 0
src = None
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
if h.verb > 1:
#print dir(msg)
print msg.items()
return bee_cnt, src
def recv_neighbour_msg_flt(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = float(txt.split()[0])
if h.verb > 1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
#}}}
def find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):
links = parsing.find_comm_link_mapping(
name, rtc_path=rtc_path, suffix=suffix, verb=verb)
if verb:
print "[I] for {}, found the following nodes/edges".format(name)
print "\t", links.items()
print "\n===================================\n\n"
return links
### ============= display ============= ###
#{{{ term codes for colored text
ERR = '\033[41m'
BLU = '\033[34m'
ENDC = '\033[0m'
#}}}
#{{{ color funcs
#def gen_cmap(m='hot', n=32) :
# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps
def gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):
t_rng = float(max_temp - min_temp)
fr = (new_temp - min_temp) / t_rng
i = int(fr * len(cmap))
# compute basic color, if on target
#r,g,b,a = cmap(i)
g = 0.0; b = 0.0; a = 1.0;
i = sorted([0, i, len(cmap)-1])[1]
r = cmap[i]
# now adjust according to distance from target
if tgt is None: tgt=new_temp
dt = np.abs(new_temp - tgt)
dt_r = dt / t_rng
h2 = np.array([r,g,b])
h2 *= (1-dt_r)
return h2
# a colormap with 8 settings, taht doesn't depend on the presence of
# matplotlib (hard-coded though.) -- depricating
_clrs = [
(0.2, 0.2, 0.2),
(0.041, 0, 0),
(0.412, 0, 0),
(0.793, 0, 0),
(1, 0.174, 0),
(1, 0.555, 0),
(1, 0.936, 0),
(1, 1, 0.475),
(1, 1, 1),
]
_dflt_clr = (0.2, 0.2, 0.2)
# can access other gradations of colour using M = cm.hot(n) for n steps, then
# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)
# BT here we're going to use 8 steps for all CASUs so no bother.
#}}}
def sep_with_nowtime():
print "# =================== t={} =================== #\n".format(
datetime.now().strftime("%H:%M:%S"))
### ============= more generic ============= ###
#{{{ a struct constructor
# some handy python utilities, from Kier Dugan
class Struct:
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def get(self, key, default=None):
return self.__dict__.get(key, default)
def addFields(self, **kwargs):
# add other fields (basically variables) after initialisation
self.__dict__.update (kwargs)
#}}}
### calibraiont
def _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):
'''
read the sensors several times, and take the highest reading
seen as the threshold.
'''
h._raw_thresh = [0] * 7 # default cases for threshold
for stp in xrange(calib_steps):
for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):
if v > h._raw_thresh[i]:
h._raw_thresh[i] = v
time.sleep(interval)
h.thresh = [x*calib_gain for x in h._raw_thresh]
h.threshold = [x*calib_gain for x in h._raw_thresh]
if h.verb:
_ts =", ".join(["{:.2f}".format(x) for x in h.thresh])
print "[I] post-calibration, we have thresh: ", _ts
| lgpl-3.0 |
Lucretiel/autocommand | test/test_automain.py | 1 | 1906 | # Copyright 2014-2016 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
import pytest
from autocommand.automain import automain, AutomainRequiresModuleError
@pytest.mark.parametrize('module_name', ['__main__', True])
def test_name_equals_main_or_true(module_name):
with pytest.raises(SystemExit):
@automain(module_name)
def main():
return 0
def test_name_not_main_or_true():
def main():
return 0
wrapped_main = automain('some_module')(main)
assert wrapped_main is main
def test_invalid_usage():
with pytest.raises(AutomainRequiresModuleError):
@automain
def main():
return 0
def test_args():
main_called = False
with pytest.raises(SystemExit):
@automain(True, args=[1, 2])
def main(a, b):
nonlocal main_called
main_called = True
assert a == 1
assert b == 2
assert main_called
def test_args_and_kwargs():
main_called = False
with pytest.raises(SystemExit):
@automain(True, args=[1], kwargs={'b': 2})
def main(a, b):
nonlocal main_called
main_called = True
assert a == 1
assert b == 2
assert main_called
| lgpl-3.0 |
mhotwagner/backstage | facade/models.py | 1 | 2837 | from django.db import models
from ckeditor.fields import RichTextField
from solo.models import SingletonModel
from phonenumber_field import modelfields as phonenumber_models
from foti.models import Foto
from opere.models import Opera
from scritti.models import Scritto
class Profile(SingletonModel):
name = models.CharField(
max_length=255,
blank=False,
)
_title = models.CharField(
max_length=255,
blank=True,
help_text='Site title used in tab. Defaults to \'name\' if left blank.',
)
tagline = models.CharField(
max_length=515,
blank=True,
help_text='Just a quick description (e.g. "Waddling through the world in search of adventure and snuggles" to go with "Nomad Penguin"',
)
intro = RichTextField(
max_length=1024,
blank=True,
)
bio_title = models.CharField(max_length=64, blank=True)
bio_image = models.ImageField(upload_to='profile', blank=True)
bio = RichTextField(
max_length=4096,
blank=True,
)
# Contact Info
_contact_name = models.CharField(
max_length=64,
blank=True,
help_text='Just in case you didn\'t use your real name up above. You can leave this blank if you want.',
)
address = models.CharField(
max_length=64,
blank=True,
)
city = models.CharField(
max_length=64,
blank=True,
)
state = models.CharField(
max_length=64,
blank=True,
)
country = models.CharField(
max_length=128,
blank=True
)
zip_code = models.CharField(
max_length=16,
blank=True,
help_text='"Postal Code", technically.'
)
email = models.EmailField(
max_length=128,
blank=True
)
phone = phonenumber_models.PhoneNumberField(blank=True)
website = models.URLField(blank=True, help_text='In case you have another one, I guess?')
twitter = models.URLField(blank=True)
facebook = models.URLField(blank=True)
instagram = models.URLField(blank=True)
linkedin = models.URLField(blank=True)
pinterest = models.URLField(blank=True)
tumblr = models.URLField(blank=True)
# Someday we'll change the first one to accept Opera
homepage_features = models.ManyToManyField(Scritto, related_name='facade_homepage_features', help_text='Max of 6!', blank=True)
writing_features = models.ManyToManyField(Scritto, related_name='facade_writing_features', help_text='Max of 6!', blank=True)
photo_features = models.ManyToManyField(Foto, related_name='facade_photo_features', help_text='Max of 6!', blank=True)
@property
def title(self):
return self._title or self.name
@property
def fullname(self):
return self._contact_name or self.name
| mit |
abadger/ansible | lib/ansible/plugins/action/yum.py | 11 | 4713 | # (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleActionFail
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
VALID_BACKENDS = frozenset(('yum', 'yum4', 'dnf'))
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
'''
Action plugin handler for yum3 vs yum4(dnf) operations.
Enables the yum module to use yum3 and/or yum4. Yum4 is a yum
command-line compatibility layer on top of dnf. Since the Ansible
modules for yum(aka yum3) and dnf(aka yum4) call each of yum3 and yum4's
python APIs natively on the backend, we need to handle this here and
pass off to the correct Ansible module to execute on the remote system.
'''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Carry-over concept from the package action plugin
if 'use' in self._task.args and 'use_backend' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')")
module = self._task.args.get('use', self._task.args.get('use_backend', 'auto'))
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template("{{ansible_facts.pkg_mgr}}")
except Exception:
pass # could not get it from template!
if module not in VALID_BACKENDS:
facts = self._execute_module(
module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
task_vars=task_vars)
display.debug("Facts %s" % facts)
module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
result['ansible_facts'] = {'pkg_mgr': module}
if module not in VALID_BACKENDS:
result.update(
{
'failed': True,
'msg': ("Could not detect which major revision of yum is in use, which is required to determine module backend.",
"You should manually specify use_backend to tell the module whether to use the yum (yum3) or dnf (yum4) backend})"),
}
)
else:
if module == "yum4":
module = "dnf"
# eliminate collisions with collections search while still allowing local override
module = 'ansible.legacy.' + module
if not self._shared_loader_obj.module_loader.has_plugin(module):
result.update({'failed': True, 'msg': "Could not find a yum module backend for %s." % module})
else:
# run either the yum (yum3) or dnf (yum4) backend module
new_module_args = self._task.args.copy()
if 'use_backend' in new_module_args:
del new_module_args['use_backend']
if 'use' in new_module_args:
del new_module_args['use']
display.vvvv("Running %s as the backend for the yum action plugin" % module)
result.update(self._execute_module(
module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
# Cleanup
if not self._task.async_val:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 |
Brawn1/Humidor_monitoring | libraries/ArduinoJson/third-party/gtest-1.7.0/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
qPCR4vir/orange3 | Orange/canvas/canvas/items/utils.py | 3 | 3147 | import numpy
import sip
from PyQt4.QtGui import QColor, QRadialGradient, QPainterPathStroker
from PyQt4.QtCore import QObject, QSignalMapper
from PyQt4.QtCore import pyqtSignal as Signal
def saturated(color, factor=150):
"""Return a saturated color.
"""
h = color.hsvHueF()
s = color.hsvSaturationF()
v = color.valueF()
a = color.alphaF()
s = factor * s / 100.0
s = max(min(1.0, s), 0.0)
return QColor.fromHsvF(h, s, v, a).convertTo(color.spec())
def sample_path(path, num=10):
"""Sample `num` equidistant points from the `path` (`QPainterPath`).
"""
space = numpy.linspace(0.0, 1.0, num, endpoint=True)
return [path.pointAtPercent(float(p)) for p in space]
def radial_gradient(color, color_light=50):
"""
radial_gradient(QColor, QColor)
radial_gradient(QColor, int)
Return a radial gradient. `color_light` can be a QColor or an int.
In the later case the light color is derived from `color` using
`saturated(color, color_light)`.
"""
if not isinstance(color_light, QColor):
color_light = saturated(color, color_light)
gradient = QRadialGradient(0.5, 0.5, 0.5)
gradient.setColorAt(0.0, color_light)
gradient.setColorAt(0.5, color_light)
gradient.setColorAt(1.0, color)
gradient.setCoordinateMode(QRadialGradient.ObjectBoundingMode)
return gradient
def toGraphicsObjectIfPossible(item):
"""Return the item as a QGraphicsObject if possible.
This function is intended as a workaround for a problem with older
versions of PyQt (< 4.9), where methods returning 'QGraphicsItem *'
lose the type of the QGraphicsObject subclasses and instead return
generic QGraphicsItem wrappers.
"""
if item is None:
return None
obj = item.toGraphicsObject()
return item if obj is None else obj
def linspace(count):
"""Return `count` evenly spaced points from 0..1 interval excluding
both end points, e.g. `linspace(3) == [0.25, 0.5, 0.75]`.
"""
return list(map(float, numpy.linspace(0.0, 1.0, count + 2, endpoint=True)[1:-1]))
def uniform_linear_layout(points):
"""Layout the points (a list of floats in 0..1 range) in a uniform
linear space while preserving the existing sorting order.
"""
indices = numpy.argsort(points)
space = numpy.asarray(linspace(len(points)))
# invert the indices
indices = invert_permutation_indices(indices)
# assert((numpy.argsort(points) == numpy.argsort(space[indices])).all())
points = space[indices]
return points.tolist()
def invert_permutation_indices(indices):
"""Invert the permutation giver by indices.
"""
inverted = [0] * len(indices)
for i, index in enumerate(indices):
inverted[index] = i
return inverted
def stroke_path(path, pen):
"""Create a QPainterPath stroke from the `path` drawn with `pen`.
"""
stroker = QPainterPathStroker()
stroker.setCapStyle(pen.capStyle())
stroker.setJoinStyle(pen.joinStyle())
stroker.setMiterLimit(pen.miterLimit())
stroker.setWidth(max(pen.widthF(), 1e-9))
return stroker.createStroke(path)
| bsd-2-clause |
hehongliang/tensorflow | tensorflow/python/kernel_tests/basic_gpu_test.py | 2 | 10569 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.cached_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.cached_session(use_gpu=True) as sess:
return sess.run(broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
| apache-2.0 |
moqada/hatena2evernote | h2e.py | 1 | 11014 | # -*- coding: utf-8 -*-
import argparse
import binascii
import datetime
import hashlib
import os
import re
import requests
import time
import urllib
from evernote.api.client import EvernoteClient
from evernote.edam.type import ttypes as Types
from BeautifulSoup import BeautifulSoup
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import SafeConfigParser
HATEBU_URL = 'http://b.hatena.ne.jp/%(username)s/atomfeed'
READABILITY_PARSER_API = (
'https://readability.com/api/content/v1/parser?url=%(url)s&token=%(token)s'
)
ENML_ENABLED_TAGS = (
'a', 'abbr', 'acronym', 'address', 'area', 'b', 'bdo', 'big', 'blockquote',
'br', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del',
'dfn', 'div', 'dl', 'dt', 'em', 'font', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'ins', 'kbd', 'li', 'map', 'ol', 'p', 'pre', 'q', 's',
'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'tt', 'u', 'ul',
'var', 'xmp'
)
ENML_DISABLED_TAGS_REGEX = re.compile(
r'<(/?)(?!(%s)(\s.*?)?>)[\w_-]+(\s.*?)?>' % '|'.join(ENML_ENABLED_TAGS))
# http://dev.evernote.com/intl/jp/doc/articles/enml.php
# name属性は使用できないとは明記していないが怒られた...
ENML_DISABLED_ATTRIBUTES = (
'rel', 'class', 'id', 'on\w*?', 'frame', 'rules', 'alt', 'datetime',
'accesskey', 'data', 'dynsrc', 'tabindex', 'name',
)
# 主に連携サービスのToken情報などを格納しているグローバル変数
global_config = {}
def fetch_entries(username, date):
""" 指定日付のはてブフィードを取得
"""
def fetch_feed(url):
print 'Fetch: ', url
res = requests.get(url)
return BeautifulSoup(res.text)
def get_date_entries(url, target_date, entries):
""" 対象日のエントリのみを取得する
フィードが対象日以前になるまでページネーションして収集を続ける
"""
soup = fetch_feed(url)
for entry in soup.findAll('entry'):
entry = get_entry(entry)
entry_d = datetime.datetime.fromtimestamp(entry['created']).date()
if target_date < entry_d:
continue
elif target_date > entry_d:
return entries
entries.append(entry)
next_link = soup.find('link', rel='next')
if next_link is not None:
get_date_entries(next_link.get('href'), target_date, entries)
def get_entry(soup_entry):
""" entry要素(BeautifulSoupオブジェクト)から必要な項目をまとめて返す
"""
created = datetime.datetime.strptime(
soup_entry.find('issued').text[:-6], '%Y-%m-%dT%H:%M:%S')
return {
'title': soup_entry.find('title').text,
'summary': soup_entry.find('summary').text or u'',
'url': soup_entry.find('link', rel='related').get('href'),
'tags': [t.text for t in soup_entry.findAll('dc:subject')],
'created': int(time.mktime(created.timetuple())),
}
hb_entries = []
feed_url = HATEBU_URL % {'username': username}
soup = fetch_feed('%s?date=%s' % (feed_url, date))
# タイトルに件数表記があって対象日のエントリ数が20件以内ならそのまま日付フィードを取得
# (日付が変わってしばらくは日付指定フィードのタイトルに件数表記がない)
# 20件より多い場合は全体フィードからひたすら対象日のエントリを収集する
title = soup.find('title').text
match = re.search(r'\((\d+)\)$', title)
if match and int(match.group(1)) <= 20:
for entry in soup.findAll('entry'):
hb_entries.append(get_entry(entry))
else:
get_date_entries(
feed_url,
datetime.datetime.strptime(date, '%Y%m%d').date(),
hb_entries)
return hb_entries
def to_enml(content, url=''):
""" HTMLをENML形式に変換
"""
enml = re.sub(r'<img(.*?)>', r'<img\1 />', content)
# 許容されていない属性を削除する
for attr in ENML_DISABLED_ATTRIBUTES:
enml = re.sub(
r'(<\w+.*?)( %s=".*?")(.*?>)' % attr,
r'\1\3', enml, flags=re.DOTALL)
# width属性も不自然な要素だと怒られるようなので変換
for attr in ('width', 'height'):
enml = re.sub(
r'<(?!(img)\s.*?>)(\w+\s.*?)(%s=(\'.*?\'|".*?"))(.*?)>' % attr,
r'<\2\5>', enml, flags=re.DOTALL)
# href の中身が空や相対パスだと怒られるので変換
enml = re.sub(
r'(<a.*?)(href="")(.*?>)', r'\1href="#"\3', enml, flags=re.DOTALL)
if url:
pattrn = (
r'\1href="%s\3"\4'
% re.search(r'https?://.*?(/|$)', url).group()
)
else:
pattrn = r'\1href="./"\4'
enml = re.sub(
r'(<a.*?)(href="(/.*?)")(.*?>)', pattrn, enml, flags=re.DOTALL)
# preにstyleを追加
enml = re.sub(
r'(<pre.*?>)',
r'<pre style="background-color:#EEE;padding:10px;">',
enml)
# 許容されていない要素をdivに変換
return re.sub(ENML_DISABLED_TAGS_REGEX, r'<\1div>', enml)
def img_to_resource(note):
""" 記事中の画像をResourceに変換してNoteに埋め込む
"""
images = {}
for img in re.finditer(r'<img.*?src="(.+?)".*?/>', note.content):
src = img.group(1)
try:
res = urllib.urlopen(src)
binary = res.read()
except Exception:
# なんらかの取得エラーが発生したら普通のimgタグのまま残しておく
continue
content_type = res.headers.get('content-type', '').split(';')[0]
if content_type.find('image/') != 0:
continue
# IEからアップロードされた画像はContent-Typeがimage/pjpegになっていることがある
# この状態のままだとEvernote上でうまく表示されない
# see: http://blog.netandfield.com/shar/2009/04/imagepjpeg.html
content_type = content_type.replace('pjpeg', 'jpeg')
md5 = hashlib.md5()
md5.update(binary)
binary_hash = md5.digest()
data = Types.Data()
data.size = len(binary)
data.bodyHash = binary_hash
data.body = binary
resource = Types.Resource()
resource.mime = content_type
resource.data = data
# width/height情報を引き継ぐ
match = re.search(r'width="(\d+)"', img.group(0))
if match:
resource.width = int(match.group(1))
match = re.search(r'height="(\d+)"', img.group(0))
if match:
resource.height = int(match.group(1))
images[img.group(0)] = resource
# imgタグをen-mediaタグに変換
for k, v in images.items():
hash_hex = binascii.hexlify(v.data.bodyHash)
note.content = note.content.replace(
k,
'<en-media type="%s" hash="%s" width="%s" height="%s"></en-media>'
% (v.mime, hash_hex, v.width or '', v.height or ''))
note.resources = images.values()
return note
def create_note(entry):
""" ブックマーク情報からEvernoteのNoteを作成
"""
client = EvernoteClient(
token=global_config['evernote']['token'], sandbox=False)
note_store = client.get_note_store()
note = Types.Note()
note.title = entry['title']
note.title = note.title.replace(unichr(int('2028', 16)), ' ')
note.title = note.title.replace(unichr(int('2029', 16)), ' ')
note.title = note.title.encode('utf-8')
content = (
u'<?xml version="1.0" encoding="UTF-8"?>'
u'<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
)
content += u'<en-note>'
if entry['summary']:
content += u'%s<hr />' % entry['summary']
content += to_enml(entry['content'], url=entry['url'])
content += u'</en-note>'
soup = BeautifulSoup(content)
note.content = str(soup)
attrs = Types.NoteAttributes(sourceURL=entry['url'])
note.attributes = attrs
note.tagNames = [e.encode('utf-8') for e in entry['tags']]
# 時間がミリ秒単位になるので1000を乗算する
note.created = entry['created'] * 1000
note = img_to_resource(note)
note_store.createNote(note)
return note
def fetch_readability(url):
""" Readability Parser API から整形したHTMLを取得
"""
res = requests.get(
READABILITY_PARSER_API % {
'url': url,
'token': global_config['readability']['token']
})
res_json = res.json()
if res_json.get('content'):
body = to_unicode(res_json.get('content'))
return body
# Readabilityでparseできない場合はその旨を本文に表記する
return u'<b>記事をパースできませんでした</b>'
def to_unicode(content):
""" JSONのマルチバイト文字列をunicodeに変換
"""
num = len(content)
words = ''
i = 0
while i < num:
if content[i] == '&':
if content[i:i + 3] == '&#x':
s_hex = ''
for j, c in enumerate(content[i + 3:], 4):
if c == ';':
break
s_hex += c
words += unichr(int(s_hex, 16))
i += j
continue
words += content[i]
i += 1
return words
def parse_config(filename):
""" 設定ファイル読み込み
"""
fp = os.path.expanduser('~/.h2e')
parser = SafeConfigParser()
parser.read(fp)
global_config.update({
'evernote': {'token': parser.get('evernote', 'token')},
'readability': {'token': parser.get('readability', 'token')},
})
def command():
""" コマンド実行
"""
yesterday = datetime.date.today() - datetime.timedelta(days=1)
parser = argparse.ArgumentParser(
description=u'はてブエントリの記事本文をEvernoteに保存します')
parser.add_argument('hatenaid', help=u'対象はてブのはてなユーザ名')
parser.add_argument(
'--date', default=yesterday.strftime('%Y%m%d'),
help=(
u'はてブの収集対象日、YYYYMMDD形式、デフォルト: 前日(%s)'
% yesterday.strftime('%Y%m%d')
))
parser.add_argument(
'--config', default='~/.h2e',
help=u'設定ファイルのパス、デフォルト: ~/.h2e'
)
ns = parser.parse_args()
parse_config(ns.config)
# 収集処理実行
entries = fetch_entries(ns.hatenaid, ns.date)
print u'Got %s entries' % len(entries)
for entry in entries:
entry['content'] = fetch_readability(entry['url'])
print u'Fetch:', entry['title'], entry['url']
create_note(entry)
if __name__ == '__main__':
command()
| mit |
zooniverse/aggregation | docs/source/conf.py | 1 | 9778 | # -*- coding: utf-8 -*-
#
# Zooniverse Aggregation Engine documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 14 11:15:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zooniverse Aggregation Engine'
copyright = u'2016, Zooniverse'
author = u'Greg Hines'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.9'
# The full version, including alpha/beta/rc tags.
release = u'0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZooniverseAggregationEnginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ZooniverseAggregationEngine.tex', u'Zooniverse Aggregation Engine Documentation',
u'Greg Hines', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zooniverseaggregationengine', u'Zooniverse Aggregation Engine Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ZooniverseAggregationEngine', u'Zooniverse Aggregation Engine Documentation',
author, 'ZooniverseAggregationEngine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['shapely','pandas','numpy','scipy','cassandra-driver',"sklearn"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) | apache-2.0 |
bbqsrc/kbdgen | pysrc/kbdgen/gen/osxutil.py | 2 | 30141 | import copy
import json
import uuid
import pathlib
import itertools
import subprocess
import re
from collections import OrderedDict
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
from ..bundle import parse_desktop_layout
from ..base import get_logger
from ..cldr import CP_REGEX
logger = get_logger(__name__)
OSX_KEYMAP = OrderedDict(
(
("C01", "0"),
("C02", "1"),
("C03", "2"),
("C04", "3"),
("C06", "4"),
("C05", "5"),
("B01", "6"),
("B02", "7"),
("B03", "8"),
("B04", "9"),
("B00", "50"), # E00 flipped!
("B05", "11"),
("D01", "12"),
("D02", "13"),
("D03", "14"),
("D04", "15"),
("D06", "16"),
("D05", "17"),
("E01", "18"),
("E02", "19"),
("E03", "20"),
("E04", "21"),
("E06", "22"),
("E05", "23"),
("E12", "24"),
("E09", "25"),
("E07", "26"),
("E11", "27"),
("E08", "28"),
("E10", "29"),
("D12", "30"),
("D09", "31"),
("D07", "32"),
("D11", "33"),
("D08", "34"),
("D10", "35"),
# U WOT 36 - space yeah yeah
("C09", "37"),
("C07", "38"),
("C11", "39"),
("C08", "40"),
("C10", "41"),
("D13", "42"),
("B08", "43"),
("B10", "44"),
("B06", "45"),
("B07", "46"),
("B09", "47"),
# U WOT 48 - backspace yeah yeah
("A03", "49"),
("E00", "10"), # B00 flipped!
("E13", "93"),
("B11", "94"),
)
)
OSX_HARDCODED = OrderedDict(
(
("36", r"\u{D}"),
("48", r"\u{9}"),
("51", r"\u{8}"),
("53", r"\u{1B}"),
("64", r"\u{10}"),
("66", r"\u{1D}"),
("70", r"\u{1C}"),
("71", r"\u{1B}"),
("72", r"\u{1F}"),
("76", r"\u{3}"),
("77", r"\u{1E}"),
("79", r"\u{10}"),
("80", r"\u{10}"),
("96", r"\u{10}"),
("97", r"\u{10}"),
("98", r"\u{10}"),
("99", r"\u{10}"),
("100", r"\u{10}"),
("101", r"\u{10}"),
("103", r"\u{10}"),
("105", r"\u{10}"),
("106", r"\u{10}"),
("107", r"\u{10}"),
("109", r"\u{10}"),
("111", r"\u{10}"),
("113", r"\u{10}"),
("114", r"\u{5}"),
("115", r"\u{1}"),
("116", r"\u{B}"),
("117", r"\u{7F}"),
("118", r"\u{10}"),
("119", r"\u{4}"),
("120", r"\u{10}"),
("121", r"\u{C}"),
("122", r"\u{10}"),
("123", r"\u{1C}"),
("124", r"\u{1D}"),
("125", r"\u{1F}"),
("126", r"\u{1E}"),
)
)
def plutil_get_json(path):
cmd = "plutil -convert json -o -".split(" ")
cmd.append(path)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
json_str = process.communicate()[0].decode()
return json.loads(json_str, object_pairs_hook=OrderedDict)
def plutil_to_xml_str(json_obj):
cmd = "plutil -convert xml1 -o - -".split(" ")
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return process.communicate(json.dumps(json_obj).encode())[0].decode()
class Pbxproj:
@staticmethod
def gen_key():
return uuid.uuid4().hex[8:].upper()
def __init__(self, path):
self._proj = plutil_get_json(path)
def __str__(self):
return plutil_to_xml_str(self._proj)
@property
def objects(self):
return self._proj["objects"]
@property
def root(self):
return self.objects[self._proj["rootObject"]]
@property
def main_group(self):
return self.objects[self.root["mainGroup"]]
def find_ref_for_name(self, name, isa=None):
logger.trace("find_ref_for_name: %s %r" % (name, isa))
for ref, o in self.objects.items():
if o.get("name", None) == name and (
isa is None or o.get("isa", None) == isa
):
return ref
return None
def find_resource_build_phase(self, target_name):
logger.trace("find_resource_build_phase: %s" % target_name)
targets = [self.objects[t] for t in self.root["targets"]]
target = None
for t in targets:
if t["name"] == target_name:
target = t
break
if target is None:
return None
for build_phase in target["buildPhases"]:
phase = self.objects[build_phase]
if phase["isa"] == "PBXResourcesBuildPhase":
return phase
return None
def create_plist_string_variant(self, variants):
o = {
"isa": "PBXVariantGroup",
"children": variants,
"name": "InfoPlist.strings",
"sourceTree": "<group>",
}
return o
def add_plist_strings(self, locales):
plist_strs = [self.create_plist_string_file(l) for l in locales]
variant = self.create_plist_string_variant(plist_strs)
var_key = Pbxproj.gen_key()
self.objects[var_key] = variant
key = Pbxproj.gen_key()
self.objects[key] = {"isa": "PBXBuildFile", "fileRef": var_key}
return (var_key, key)
def add_plist_strings_to_build_phase(self, locales, target_name):
phase = self.find_resource_build_phase(target_name)
(var_ref, ref) = self.add_plist_strings(locales)
phase["files"].append(ref)
return var_ref
def find_variant_group(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXVariantGroup"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
return o
def set_target_build_setting(self, target, key, value):
logger.trace("set_target_build_setting: %r %r %r" % (target, key, value))
o = self.find_target(target)
build_cfg_list = self.objects[o["buildConfigurationList"]]
build_cfgs = [self.objects[x] for x in build_cfg_list["buildConfigurations"]]
for cfg in build_cfgs:
cfg["buildSettings"][key] = value
def set_target_package_id(self, target, new_id):
logger.trace("set_target_package_id: %r %r" % (target, new_id))
o = self.find_target(target)
build_cfg_list = self.objects[o["buildConfigurationList"]]
build_cfgs = [self.objects[x] for x in build_cfg_list["buildConfigurations"]]
for cfg in build_cfgs:
cfg["buildSettings"]["PRODUCT_BUNDLE_IDENTIFIER"] = new_id
def add_file_ref_to_variant_group(self, file_ref, variant_name):
variant = self.find_variant_group(variant_name)
variant["children"].append(file_ref)
return variant
def add_plist_strings_to_variant_group(self, locales, variant_name, target_name):
variant = self.find_variant_group(variant_name)
o = []
for locale in locales:
ref = self.create_plist_string_file(locale, target_name)
variant["children"].append(ref)
o.append(ref)
return o
def add_ref_to_group(self, ref, group_list):
logger.trace("add_ref_to_group: %r %r" % (ref, group_list))
o = self.main_group
n = False
for g in group_list:
for c in o["children"]:
co = self.objects[c]
if n:
break
if co.get("path", co.get("name", None)) == g:
o = co
n = True
if n:
n = False
continue
else:
# Create new group
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXGroup",
"children": [],
"path": g,
"sourceTree": "<group>",
}
o["children"].append(ref)
n = False
o = self.objects[ref]
continue
o["children"].append(ref)
return True
def create_file_reference(self, file_type, locale, name, **kwargs):
logger.trace(
"create_file_reference: %r %r %r %r" % (file_type, locale, name, kwargs)
)
o = {
"isa": "PBXFileReference",
"lastKnownFileType": file_type,
"name": locale,
"path": "%s.lproj/%s" % (locale, name),
"sourceTree": "<group>",
}
o.update(kwargs)
k = Pbxproj.gen_key()
self.objects[k] = o
return k
def create_plist_file(self, plist_path):
logger.trace("create_plist_file: %r" % plist_path)
o = {
"isa": "PBXFileReference",
"lastKnownFileType": "text.plist.xml",
"name": pathlib.Path(plist_path).name,
"path": plist_path,
"sourceTree": "<group>",
}
k = Pbxproj.gen_key()
self.objects[k] = o
return k
def create_plist_string_file(self, locale, name="InfoPlist.strings"):
return self.create_file_reference("text.plist.strings", locale, name)
def create_text_file(self, locale, name):
return self.create_file_reference("text", locale, name)
def add_path(self, path_list, target=None):
if target is None:
target = self.main_group
for name in path_list:
children = [self.objects[r] for r in target["children"]]
for c in children:
if c.get("path", None) == name:
target = c
break
else:
ref = Pbxproj.gen_key()
o = {
"children": [],
"isa": "PBXGroup",
"path": name,
"sourceTree": "<group>",
}
self.objects[ref] = o
target["children"].append(ref)
target = self.objects[ref]
def clear_target_dependencies(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
# HACK: unclear; leaves dangling nodes
o["dependencies"] = []
def clear_target_embedded_binaries(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
o["files"] = []
def create_container_item_proxy(self, container_portal, remote_ref, info):
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXContainerItemProxy",
"containerPortal": container_portal,
"proxyType": "1",
"remoteGlobalIDString": remote_ref,
"remoteInfo": info,
}
logger.debug(self.objects[ref])
return ref
def create_target_dependency(self, proxy_ref, dep_ref):
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXTargetDependency",
"targetProxy": proxy_ref,
"target": dep_ref,
}
logger.debug(self.objects[ref])
return ref
def add_dependency_to_target(self, target_ref, dep_ref):
target = self.objects[target_ref]
if target.get("dependencies", None) is None:
target["dependencies"] = []
target["dependencies"].append(dep_ref)
logger.debug(target)
def add_appex_to_target_dependencies(self, appex, target):
logger.debug("add_appex_to_target_dependencies: %s %s" % (appex, target))
# Find target
appex_ref = self.find_ref_for_name(appex, isa="PBXNativeTarget")
logger.debug("Appex ref: " + appex_ref)
# Create container proxy
proxy_ref = self.create_container_item_proxy(
self._proj["rootObject"], appex_ref, appex
)
logger.debug("Proxy ref: " + proxy_ref)
# Create target dependency
dep_ref = self.create_target_dependency(proxy_ref, appex_ref)
logger.debug("Target dep ref: " + dep_ref)
# Add to deps
target_ref = self.find_ref_for_name(target, isa="PBXNativeTarget")
logger.debug(target_ref)
self.add_dependency_to_target(target_ref, dep_ref)
def remove_appex_from_target_embedded_binaries(self, appex, target):
logger.trace(
"remove_appex_from_target_embedded_binaries: %s %s" % (appex, target)
)
for appex_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXFileReference"
and o.get("path", None) == appex
):
break
else:
raise Exception("No appex src found.")
logger.trace("appex_ref: %r" % appex_ref)
for appex_file_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXBuildFile"
and o.get("fileRef", None) == appex_ref
):
break
else:
raise Exception("No appex src found.")
for appex_native_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("productReference", None) == appex_ref
):
break
else:
raise Exception("No target src found.")
for native_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No target src found.")
logger.trace("native_ref: %r" % native_ref)
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
# native_target = o
for target_dep_ref, o in self.objects.items():
if o.get("isa", None) == "PBXTargetDependency":
logger.trace(o)
if (
o.get("isa", None) == "PBXTargetDependency"
and o.get("target", None) == appex_native_ref
):
break
else:
raise Exception("No dependency target src found.")
# target_dep = o
target_o["dependencies"].remove(target_dep_ref)
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
o["files"].remove(appex_file_ref)
break
else:
raise Exception("No src found.")
# del self.objects[appex_ref]
def add_appex_to_target_embedded_binaries(self, appex, target):
logger.trace("add_appex_to_target_embedded_binaries: %s %s" % (appex, target))
for appex_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXFileReference"
and o.get("path", None) == appex
):
break
else:
raise Exception("No appex src found.")
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No target src found.")
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
ref = Pbxproj.gen_key()
appex_o = {
"isa": "PBXBuildFile",
"fileRef": appex_ref,
"settings": {"ATTRIBUTES": ["RemoveHeadersOnCopy"]},
}
self.objects[ref] = appex_o
o["files"].append(ref)
def find_target(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
return o
else:
raise Exception("No src found.")
def add_source_ref_to_build_phase(self, ref, target):
logger.trace("add_source_ref_to_build_phase: %r %r" % (ref, target))
target_o = self.find_target(target)
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if o.get("isa", None) == "PBXSourcesBuildPhase":
break
else:
raise Exception("No src found.")
nref = Pbxproj.gen_key()
self.objects[nref] = {"isa": "PBXBuildFile", "fileRef": ref}
o["files"].append(nref)
def remove_target(self, target):
logger.trace("remove_target: %r" % target)
for ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
prod_ref = o["productReference"]
logger.trace("remove_target productReference: %r" % prod_ref)
del self.objects[o["productReference"]]
delete_refs = []
for target_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXTargetDependency"
and o.get("target", None) == ref
):
delete_refs.append(target_ref)
for dref in delete_refs:
del self.objects[dref]
for nref, o in self.objects.items():
if (
o.get("isa", None) == "PBXBuildFile"
and o.get("fileRef", None) == prod_ref
):
break
else:
raise Exception("No src found.")
for o in self.objects.values():
if o.get("isa", None) == "PBXGroup" and o.get("name", None) == "Products":
break
else:
raise Exception("No src found.")
o["children"].remove(prod_ref)
self.root["targets"].remove(ref)
del self.objects[ref]
def duplicate_target(self, src_name, dst_name, plist_path):
logger.trace("duplicate_target: %r %r %r" % (src_name, dst_name, plist_path))
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == src_name
):
break
else:
raise Exception("No src found.")
base_clone = copy.deepcopy(o)
base_ref = Pbxproj.gen_key()
self.objects[base_ref] = base_clone
base_clone["name"] = dst_name
conf_ref = Pbxproj.gen_key()
conf_clone = copy.deepcopy(self.objects[base_clone["buildConfigurationList"]])
self.objects[conf_ref] = conf_clone
base_clone["buildConfigurationList"] = conf_ref
new_confs = []
for conf in conf_clone["buildConfigurations"]:
ref = Pbxproj.gen_key()
new_confs.append(ref)
self.objects[ref] = copy.deepcopy(self.objects[conf])
self.objects[ref]["buildSettings"]["INFOPLIST_FILE"] = plist_path
self.objects[ref]["buildSettings"]["PRODUCT_NAME"] = dst_name
self.objects[ref]["buildSettings"]["CODE_SIGN_STYLE"] = "Manual"
self.objects[ref]["buildSettings"]["ENABLE_BITCODE"] = "NO"
conf_clone["buildConfigurations"] = new_confs
appex_ref = Pbxproj.gen_key()
appex_clone = copy.deepcopy(self.objects[base_clone["productReference"]])
self.objects[appex_ref] = appex_clone
appex_clone["path"] = "%s.appex" % dst_name
base_clone["productReference"] = appex_ref
# PBXContainerItemProxy etc seem unaffected by leaving dependencies in
# base_clone['dependencies'] = []
self.add_ref_to_group(appex_ref, ["Products"])
self.root["targets"].append(base_ref)
return base_clone, appex_ref
def generate_osx_mods():
conv = OrderedDict(
(
("cmd", "command"),
("caps", "caps"),
("alt", "anyOption"),
("shift", "anyShift"),
)
)
def gen_conv(tpl):
tplo = []
for t, v in conv.items():
if t not in tpl:
v += "?"
tplo.append(v)
return tuple(tplo)
m = ("caps", "alt", "shift")
mods = (x for i in range(len(m)) for x in itertools.combinations(m, i))
o = OrderedDict()
for mod in mods:
mod = ("cmd",) + mod
o["+".join(mod)] = (" ".join(gen_conv(mod)),)
return o
class OSXKeyLayout:
doctype = (
'<!DOCTYPE keyboard PUBLIC "" '
+ '"file://localhost/System/Library/DTDs/KeyboardLayout.dtd">'
)
modes = OrderedDict(
(
("default", ("command?",)),
("shift", ("anyShift caps? command?",)),
("caps", ("caps",)),
("caps+shift", ("caps anyShift",)),
("alt", ("anyOption command?",)),
("alt+shift", ("anyOption anyShift caps? command?",)),
("caps+alt", ("caps anyOption command?",)),
("caps+alt+shift", ("caps anyOption anyShift command?",)),
("ctrl", ("anyShift? caps? anyOption? anyControl",)),
("cmd", ("command",)),
("cmd+shift", ("command anyShift",)),
)
)
modes.update(generate_osx_mods())
# TODO unused
required = ("default", "shift", "caps")
DEFAULT_CMD = parse_desktop_layout(
r"""
§ 1 2 3 4 5 6 7 8 9 0 - =
q w e r t y u i o p [ ]
a s d f g h j k l ; ' \
` z x c v b n m , . /
"""
)
DEFAULT_CMD_SHIFT = parse_desktop_layout(
r"""
± ! @ # $ % ^ & * ( ) _ +
Q W E R T Y U I O P { }
A S D F G H J K L : " |
~ Z X C V B N M < > ?
"""
)
def __bytes__(self):
"""XML almost; still encode the control chars. Death to standards!"""
# Convert
v = CP_REGEX.sub(lambda x: "&#x%04X;" % int(x.group(1), 16), str(self))
v = re.sub(
r"&(quot|amp|apos|lt|gt);",
lambda x: {
""": """,
"&": "&",
"'": "'",
"<": "<",
">": ">",
}[x.group(0)],
v,
)
return ('<?xml version="1.1" encoding="UTF-8"?>\n%s' % v).encode("utf-8")
def __str__(self):
root = copy.deepcopy(self.elements["root"])
actions = root.findall("actions")[0]
terminators = root.findall("terminators")[0]
if len(actions) == 0:
root.remove(actions)
if len(terminators) == 0:
root.remove(terminators)
return self.doctype + etree.tostring(
root, encoding="unicode"
)
def __init__(self, name, id_):
modifiers_ref = "modifiers"
mapset_ref = "default"
self.elements = {}
root = Element("keyboard", group="126", id=id_, name=name)
self.elements["root"] = root
self.elements["layouts"] = SubElement(root, "layouts")
SubElement(
self.elements["layouts"],
"layout",
first="0",
last="17",
mapSet=mapset_ref,
modifiers=modifiers_ref,
)
self.elements["modifierMap"] = SubElement(
root, "modifierMap", id=modifiers_ref, defaultIndex="0"
)
self.elements["keyMapSet"] = SubElement(root, "keyMapSet", id=mapset_ref)
self.elements["actions"] = SubElement(root, "actions")
self.elements["terminators"] = SubElement(root, "terminators")
self.key_cache = {}
self.kmap_cache = {}
self.action_cache = {}
class KeyIncrementer:
def __init__(self, prefix):
self.prefix = prefix
self.data = {}
self.c = 0
def has(self, key):
return key in self.data
def get(self, key):
if self.data.get(key, None) is None:
self.data[key] = self.c
self.c += 1
return "%s%03d" % (self.prefix, self.data[key])
self.states = KeyIncrementer("s")
self.actions = KeyIncrementer("a")
self._n = 0
def _add_modifier_map(self, mode):
mm = self.elements["modifierMap"]
kms = self.elements["keyMapSet"]
node = SubElement(mm, "keyMapSelect", mapIndex=str(self._n))
mods = self.modes.get(mode, None)
for mod in mods:
SubElement(node, "modifier", keys=mod)
self.kmap_cache[mode] = SubElement(kms, "keyMap", index=str(self._n))
self._n += 1
return self.kmap_cache[mode]
def _get_kmap(self, mode):
kmap = self.kmap_cache.get(mode, None)
if kmap is not None:
return kmap
return self._add_modifier_map(mode)
def _set_key(self, mode, key, key_id, action=None, output=None):
if action is not None and output is not None:
raise Exception("Cannot specify contradictory action and output.")
key_key = "%s %s" % (mode, key_id)
node = self.key_cache.get(key_key, None)
if node is None:
kmap_node = self._get_kmap(mode)
node = SubElement(kmap_node, "key", code=key_id)
self.key_cache[key_key] = node
if action is not None:
node.attrib["action"] = str(action)
if node.attrib.get("output", None) is not None:
del node.attrib["output"]
elif output is not None:
node.attrib["output"] = str(output)
if node.attrib.get("action", None) is not None:
del node.attrib["action"]
def _set_default_action(self, key):
action_id = self.actions.get(key) # "Key %s" % key
action = self.action_cache.get(action_id, None)
if action is None:
action = SubElement(self.elements["actions"], "action", id=action_id)
self.action_cache[action_id] = action
def _set_terminator(self, action_id, output):
termin = self.elements["terminators"].findall(
'when[@state="%s"]' % action_id.replace('"', r""")
)
if len(termin) == 0:
el = SubElement(self.elements["terminators"], "when")
el.set("state", action_id)
el.set("output", output)
def _set_default_transform(self, action_id, output):
action = self.action_cache.get(action_id, None)
# TODO create a generic create or get method for actions
if action is None:
logger.trace(
"Create default action - action:%r output:%r" % (action_id, output)
)
action = SubElement(self.elements["actions"], "action", id=action_id)
self.action_cache[action_id] = action
if len(action.findall('when[@state="none"]')) == 0:
logger.trace(
"Create 'none' when - action:%r output:%r" % (action_id, output)
)
el = SubElement(action, "when")
el.set("state", "none")
el.set("output", output)
def set_key(self, mode, key, key_id):
self._set_key(mode, key, key_id, output=key)
def set_deadkey(self, mode, key, key_id, output):
"""output is the output when the deadkey is followed by an invalid"""
logger.trace("%r %r %r %r" % (mode, key, key_id, output))
action_id = self.actions.get(key) # "Key %s" % key
pressed_id = self.states.get(key) # "State %s" % key
self._set_key(mode, key, key_id, action=action_id)
# Create default action (set to pressed state)
self._set_default_action(key)
self._set_terminator(pressed_id, output)
def set_transform_key(self, mode, key, key_id):
action_id = self.actions.get(key) # "Key %s" % key
self._set_key(mode, key, key_id, action=action_id)
# Find action, add none state (move the output)
self._set_default_transform(action_id, key)
def add_transform(self, action_id, state, output=None, next=None):
action = self.action_cache.get(action_id, None)
if action is None:
raise Exception("'%s' was not a found action_id." % action_id)
if output is not None and next is not None:
raise Exception("Output and next cannot be simultaneously defined.")
if output is not None:
el = SubElement(action, "when")
el.set("state", state)
el.set("output", output)
elif next is not None:
el = SubElement(action, "when")
el.set("state", state)
el.set("next", next)
# logger.trace("%r" % el)
| apache-2.0 |
kavardak/suds | suds/mx/encoded.py | 211 | 4651 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides encoded I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.literal import Literal
from suds.mx.typer import Typer
from suds.sudsobject import Factory, Object
from suds.xsd.query import TypeQuery
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Literal):
"""
A SOAP section (5) encoding marshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# For soap encoded arrays, the 'aty' (array type) information
# is extracted and added to the 'content'. Then, the content.value
# is replaced with an object containing an 'item=[]' attribute
# containing values that are 'typed' suds objects.
#
start = Literal.start(self, content)
if start and isinstance(content.value, (list,tuple)):
resolved = content.type.resolve()
for c in resolved:
if hasattr(c[0], 'aty'):
content.aty = (content.tag, c[0].aty)
self.cast(content)
break
return start
def end(self, parent, content):
#
# For soap encoded arrays, the soapenc:arrayType attribute is
# added with proper type and size information.
# Eg: soapenc:arrayType="xs:int[3]"
#
Literal.end(self, parent, content)
if content.aty is None:
return
tag, aty = content.aty
ns0 = ('at0', aty[1])
ns1 = ('at1', 'http://schemas.xmlsoap.org/soap/encoding/')
array = content.value.item
child = parent.getChild(tag)
child.addPrefix(ns0[0], ns0[1])
child.addPrefix(ns1[0], ns1[1])
name = '%s:arrayType' % ns1[0]
value = '%s:%s[%d]' % (ns0[0], aty[0], len(array))
child.set(name, value)
def encode(self, node, content):
if content.type.any():
Typer.auto(node, content.value)
return
if content.real.any():
Typer.auto(node, content.value)
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace()
Typer.manual(node, name, ns)
def cast(self, content):
"""
Cast the I{untyped} list items found in content I{value}.
Each items contained in the list is checked for XSD type information.
Items (values) that are I{untyped}, are replaced with suds objects and
type I{metadata} is added.
@param content: The content holding the collection.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
aty = content.aty[1]
resolved = content.type.resolve()
array = Factory.object(resolved.name)
array.item = []
query = TypeQuery(aty)
ref = query.execute(self.schema)
if ref is None:
raise TypeNotFound(qref)
for x in content.value:
if isinstance(x, (list, tuple)):
array.item.append(x)
continue
if isinstance(x, Object):
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
if isinstance(x, dict):
x = Factory.object(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
x = Factory.property(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
content.value = array
return self
| lgpl-3.0 |
JuanMatSa/PyFME | src/pyfme/models/tests/test_euler_flat_earth.py | 5 | 5115 | # -*- coding: utf-8 -*-
"""
Tests of equations of euler flat earth model.
"""
import numpy as np
from pyfme.models.euler_flat_earth import (lamceq,
lamceq_jac,
kaeq,
kaeq_jac,
kleq)
def test1_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 1, 1, 1, 1, 1], dtype=float)
mass = 10
inertia = np.array([[1000, 0, -100],
[ 0, 100, 0],
[-100, 0, 100]], dtype=float)
forces = np.array([100., 100., 100.], dtype=float)
moments = np.array([100., 1000., 100], dtype=float)
expected_sol = np.array([10, 10, 10, 11./9, 1, 92./9], dtype=float)
sol = lamceq(time, vel, mass, inertia, forces,
moments)
assert(np.allclose(expected_sol, sol))
def test2_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 0, 1, 0, 1, 0], dtype=float)
mass = 10
inertia = np.array([[100, 0, -10],
[ 0, 100, 0],
[-10, 0, 100]], dtype=float)
forces = np.array([1000, 10, 10], dtype=float)
moments = np.array([100, 100, 100], dtype=float)
expected_sol = np.array([99, 1, 2, 10./9, 1, 10./9], dtype=float)
sol = lamceq(time, vel, mass, inertia, forces,
moments)
assert(np.allclose(expected_sol, sol))
def test1_jac_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 1, 1, 1, 1, 1], dtype=float)
mass = 10
inertia = np.array([[1000, 0, -100],
[ 0, 100, 0],
[-100, 0, 100]], dtype=float)
expected_sol = np.zeros([6, 6], dtype=float)
expected_sol[0, 1] = 1
expected_sol[0, 2] = - 1
expected_sol[0, 4] = - 1
expected_sol[0, 5] = 1
expected_sol[1, 0] = - 1
expected_sol[1, 2] = 1
expected_sol[1, 3] = 1
expected_sol[1, 5] = - 1
expected_sol[2, 0] = 1
expected_sol[2, 1] = - 1
expected_sol[2, 3] = - 1
expected_sol[2, 4] = 1
expected_sol[3, 3] = 10./9
expected_sol[3, 4] = 1
expected_sol[3, 5] = - 1./9
expected_sol[4, 3] = - 11
expected_sol[4, 5] = - 7
expected_sol[5, 3] = 91./9
expected_sol[5, 4] = 9
expected_sol[5, 5] = - 10./9
sol = lamceq_jac(time, vel, mass, inertia)
assert(np.allclose(expected_sol, sol))
def test2_jac_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 0, 1, 0, 1, 0], dtype=float)
mass = 10
inertia = np.array([[100, 0, -10],
[ 0, 100, 0],
[-10, 0, 100]], dtype=float)
expected_sol = np.zeros([6, 6], dtype=float)
expected_sol[0, 2] = - 1
expected_sol[0, 4] = - 1
expected_sol[1, 3] = 1
expected_sol[1, 5] = - 1
expected_sol[2, 0] = 1
expected_sol[2, 4] = 1
expected_sol[3, 3] = 10./99
expected_sol[3, 5] = - 1./99
expected_sol[5, 3] = 1./99
expected_sol[5, 5] = - 10./99
sol = lamceq_jac(time, vel, mass, inertia)
assert(np.allclose(expected_sol, sol))
def test1_kinematic_angular_eqs():
time = 0
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
ang_vel = np.array([1, 1, 1], dtype=float)
expected_sol = np.array([0, 1 + 2 ** 0.5, 2])
sol = kaeq(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test2_kinematic_angular_eqs():
time = 0
euler_angles = np.array([0, np.pi / 2, 0])
ang_vel = np.array([0, 1, 0], dtype=float)
expected_sol = np.array([0, 0, 1], dtype=float)
sol = kaeq(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test1_jac_kinematic_angular_eqs():
time = 0
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
ang_vel = np.array([1, 1, 1], dtype=float)
expected_sol = np.zeros([3, 3])
expected_sol[0, 1] = - 2 ** 0.5
expected_sol[1, 0] = 2 * 2 ** 0.5
expected_sol[2, 0] = 2
sol = kaeq_jac(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test2_jac_kinematic_angular_eqs():
time = 0
euler_angles = np.array([0, np.pi / 2, 0])
ang_vel = np.array([0, 1, 0], dtype=float)
expected_sol = np.zeros([3, 3], dtype=float)
expected_sol[0, 1] = - 1
expected_sol[1, 0] = 1
sol = kaeq_jac(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test1_navigation_eqs():
time = 0
lin_vel = np.array([1, 1, 1], dtype=float)
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
expected_sol = np.array([1 + (2 ** 0.5) / 2, 0, 1 - (2 ** 0.5) / 2])
sol = kleq(time, lin_vel, euler_angles)
assert(np.allclose(expected_sol, sol))
def test2_navigation_eqs():
time = 0
lin_vel = np.array([1, 0, 1], dtype=float)
euler_angles = np.array([0, np.pi / 2, 0])
expected_sol = np.array([1, - 1, 0], dtype=float)
sol = kleq(time, lin_vel, euler_angles)
assert(np.allclose(expected_sol, sol))
| mit |
plaice/Zebrackets | src/zebrackets/zebraFont.py | 1 | 8776 | #!/usr/bin/python3
# File zebraFont.py
#
# Copyright (c) Blanca Mancilla, John Plaice, 2015, 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''zebraFont.py TYPE STYLE STRIPES SIZE FAMILY MAG
creates a new MetaFont file and then invokes it.
'''
import argparse
import glob
import io
import math
import os
import re
import subprocess
import shutil
import sys
import zebraFontFiles
import zebraHelp
class Parameters:
def __init__(self, kind, style, slots, family,
size, mag, texmfHome, checkArgs):
self.kind = zebraHelp.validate_kind(kind)
self.style = zebraHelp.validate_style(style)
self.slots = zebraHelp.validate_slots(slots)
self.slotsAsLetter = chr(ord('a') + self.slots)
self.family = zebraHelp.validate_family(family)
self.size = zebraHelp.validate_size(size)
zebraHelp.validate_family_size(family, size)
self.mag = zebraHelp.validate_mag(mag)
self.texmfHome = zebraHelp.validate_texmfhome(texmfHome)
self.checkArgs = checkArgs
def callAndLog(args, log):
try:
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, universal_newlines=True)
output = proc.stdout.read()
if output != '':
log.append(output)
except subprocess.CalledProcessError:
raise zebraHelp.CompError('System died when calling {0}'.format(*args))
def createMFcontent(kind, style, slots, sourceFont):
'''This method creates the font file's header, returning it as string.
'''
styledict = { 'b' : '0', 'f' : '1', 'h' : '2' }
textFormat = '''% Copied from rtest on p.311 of the MetaFont book.
if unknown cmbase: input cmbase fi
mode_setup;
def generate suffix t = enddef;
input {0}; font_setup;
let iff = always_iff;
slots:={1};
foreground:={2};
input zeroman{3};'''
text = textFormat.format(
sourceFont, slots,
styledict[style], kind)
return text
def checkAndCreateFont(fileName, destMFdir, fileContent, texmfHome, log):
# Check if the font file exists already, and not create it.
# Write the content in the file.
fileNameMF = '{0}.mf'.format(fileName)
try:
subprocess.check_output(['kpsewhich', fileNameMF])
except subprocess.CalledProcessError:
destMFpath = '{0}/{1}.mf'.format(destMFdir, fileName)
with open(destMFpath, 'w') as fileMF:
fileMF.write(fileContent)
callAndLog(['mktexlsr', texmfHome], log)
def createMFfiles(params):
# Set up of diretories and files names
sourceFont = '{0}{1}'.format(params.family, int(params.size))
destMFdir = '{0}/fonts/source/public/zbtex'.format(params.texmfHome)
destTFMdir = '{0}/fonts/tfm/public/zbtex'.format(params.texmfHome)
destPKdir = '{0}/fonts/pk/ljfour/public/zbtex'.format(params.texmfHome)
destMF = 'z{0}{1}{2}{3}'.format(
params.kind, params.style,
params.slotsAsLetter, sourceFont)
destMFpath = '{0}/{1}.mf'.format(destMFdir, destMF)
textMFfile = createMFcontent(
params.kind, params.style,
params.slots, sourceFont)
# Check that the master font exists in the TeX ecosystem.
try:
subprocess.check_output(['kpsewhich', '{0}.mf'.format(sourceFont)])
except subprocess.CalledProcessError:
raise zebraHelp.CompError('File "{0}.mf" does not exist'.format(destMF))
# Create the directory where font files will be stored for this run.
try:
os.makedirs(destMFdir)
except FileExistsError:
pass
zbtexFontsLog = []
## This is now outside in def method
# Check if the font file exists already, and not create it.
# Write the content in the file.
checkAndCreateFont(
destMF, destMFdir, textMFfile, params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zepunctb', destMFdir, zebraFontFiles.str_zepunctb,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zepunctp', destMFdir, zebraFontFiles.str_zepunctp,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zeromanb', destMFdir, zebraFontFiles.str_zeromanb,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zeromanp', destMFdir, zebraFontFiles.str_zeromanp,
params.texmfHome, zbtexFontsLog)
# Checking main fonts exists
# generate the TFM font and install the file
# generate the ls-R database used by the kpathsea library
try:
subprocess.check_output(['kpsewhich', '{0}.tfm'.format(destMF)])
except subprocess.CalledProcessError:
callAndLog(['mktextfm', destMF], zbtexFontsLog)
callAndLog(
['mktexlsr', params.texmfHome], zbtexFontsLog)
if int(params.mag) != 1:
dpi = params.mag * 600
try:
subprocess.check_output(
['kpsewhich', '{0}.{1}pk'.format(destMF, dpi)])
except subprocess.CalledProcessError:
try:
proc = subprocess.Popen(
['kpsewhich', '{0}.600pk'.format(destMF)],
stdout=subprocess.PIPE, universal_newlines=True)
except subprocess.CalledProcessError:
raise zebraHelp.CompError('Could not find file {0}.600pk'.
format(destMF))
dpidir = re.sub('/[^/]*$', '', proc.stdout.read())
callAndLog(['mf-nowin',
'-progname=mf',
'\\mode:=ljfour; mag:={0}; nonstopmode; input {1}'.
format(math.sqrt(float(params.mag)), destMF)],
zbtexFontsLog)
callAndLog(['gftopk',
'{0}.{1}gf'.format(destMF, dpi),
'{0}.{1}pk'.format(destMF, dpi)],
zbtexFontsLog)
shutil.move('{0}.{1}pk'.format(destMF, dpi), dpidir)
callAndLog(['mktexlsr', params.texmfHome], zbtexFontsLog)
for file in glob.glob('{0}.*'.format(destMF)):
os.unlink(file)
with open('zbtexfonts.log', 'a') as zbtexLogFile:
for string in zbtexFontsLog:
zbtexLogFile.write(string)
def zebraFont(kind, style, slots, family,
size, mag, texmfHome, checkArgs):
try:
parameters = Parameters(kind, style, slots, family,
size, mag, texmfHome, checkArgs)
if checkArgs is False:
createMFfiles(parameters)
return zebraHelp.Result(True, "")
except zebraHelp.ArgError as e:
return zebraHelp.Result(False, "zebraFont ArgError: " + e)
except zebraHelp.CompError as e:
return zebraHelp.Result(False, "zebraFont CompError: " + e)
def zebraFontParser(inputArguments = sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Build a zebrackets font.',
epilog="This module is part of the zebrackets package.")
parser.add_argument('--kind', type=str, choices=zebraHelp.validKinds,
required=True, help='b = bracket, p = parenthesis')
parser.add_argument('--style', type=str, choices=zebraHelp.validStyles,
required=True, help='b = background, f = foreground, h = hybrid')
parser.add_argument('--slots', type=int,
required=True, choices=zebraHelp.validSlots,
help='number of slots in brackets')
parser.add_argument('--family', type=str,
choices=zebraHelp.validFontFamilies,
required=True, help='font family')
parser.add_argument('--size', type=int,
choices=zebraHelp.validFontSizes,
required=True, help='font size')
parser.add_argument('--mag', type=int,
default=1, help='magnification')
parser.add_argument('--texmfhome', type=str,
help='substitute for variable TEXMFHOME')
parser.add_argument('--checkargs', action='store_true',
help='check validity of input arguments')
args = parser.parse_args(inputArguments)
return zebraFont(args.kind, args.style, args.slots, args.family,
args.size, args.mag, args.texmfhome, args.checkargs)
if __name__ == '__main__':
zebraFontParser()
| gpl-3.0 |
Tallefer/karaka | karaka/api/api.py | 4 | 6215 | #
# Karaka Skype-XMPP Gateway: Customer API
# <http://www.vipadia.com/products/karaka.html>
#
# Copyright (C) 2008-2009 Vipadia Limited
# Richard Mortier <[email protected]>
# Neil Stratford <[email protected]>
#
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License version
## 2 as published by the Free Software Foundation.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License version 2 for more details.
## You should have received a copy of the GNU General Public License
## version 2 along with this program; if not, write to the Free
## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
import time
import MySQLdb
##
## Copied from common.py
## DO NOT EDIT
##
import syslog
def _log(level, mesg):
if "\n" in mesg: mesgs = mesg.split("\n")
else: mesgs = [mesg]
pfx = ""
for mesg in mesgs:
while len(mesg) > 254:
syslog.syslog(level, "%s%s" % (pfx, mesg[:254].encode("utf-8"),))
mesg = "%s" % mesg[254:]
pfx = "||"
syslog.syslog(level, "%s%s" % (pfx, mesg.encode("utf-8")))
pfx = "|"
def _dbg(s): _log(syslog.LOG_DEBUG, s)
##
## End
##
from apiconfig import APIConfig
## # Crypto - KeyCzar
## from keyczar import keyczar
## PRIVATE_KEYLOC="/etc/karaka/keys/private/"
## PUBLIC_KEYLOC="/etc/karaka/keys/public/"
# Debug
Debug = 6
def dbg(s, l=0):
if Debug > l: _dbg(s)
##
## Database API
## Invoked by MASTER to persist registrations and CDRs
##-----------------------------------------------------
class DatabaseAPI:
def __init__(self):
self.config = APIConfig()
self.conn = MySQLdb.connect(
self.config.sql_server, self.config.sql_user, self.config.sql_password,
self.config.sql_database)
self.conn.autocommit(True)
def _invoke(self, cmd, args=None):
dbg("_invoke: cmd:%s args:%s" % (cmd, args), 5)
cursor = self.conn.cursor()
nrows = cursor.execute(cmd, args)
rows = cursor.fetchall()
cursor.close()
dbg(" nrows:%s rows:%s" % (nrows, rows,), 5)
return (nrows, rows)
## set_credentials_plain(user-jid, skype-handle, skype-secret) -> (bool, reason)
## insert credentials into the database for this user
def set_credentials_plain(self, userjid, skypeuser, skypesecret):
# Encrypt before writing to DB
dbg("set_credentials: userjid:%s skypeuser:%s skypesecret:*" % (
userjid, skypeuser), 4)
## KeyCzar
## crypter = keyczar.Encrypter.Read(PUBLIC_KEYLOC)
## skypesecret = crypter.Encrypt(str(skypesecret))
dbg(" encrypt(skypesecret):%s" % (skypesecret,), 4)
cmd = "INSERT INTO registrations (userjid, user, secret) VALUES (%s, %s, %s)"
args = (userjid, skypeuser, skypesecret,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
return (True, "Success")
## remove_credentials(user-jid)
## delete credentials from the database for this user
def remove_credentials(self, userjid):
dbg("remove_credentials: userjid:%s" % (userjid,), 4)
cmd = "DELETE FROM registrations WHERE userjid=%s"
args = (userjid,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
## get_credentials_crypt(user-jid) -> (skype-user, encrypted-skype-password)
## retrieve credentials (enctypted password) for this user
def get_credentials_crypt(self, userjid):
dbg("get_credentials: userjid:%s" % (userjid,), 4)
cmd = "SELECT user, secret FROM registrations WHERE userjid=%s"
args = (userjid,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
if not res: return res
return (res[0][0], res[0][1])
## get_marketing_message(user-jid)
## retrieve mood message prefix
def get_marketing_message(self, userjid):
dbg("get_marketing_message: userjid:%s" % (userjid,), 4)
return self.config.marketing_message
## log_start(user-jid, skype-user)
## record the start event for a user signing in
def log_start(self, userjid, skypehandle):
dbg("log_start: user:%s skypehandle:%s" % (userjid, skypehandle), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "start", "")
self._invoke(cmd, args)
## log_stop(user-jid, skype-user)
## record the stop event for a user signing out
def log_stop(self, userjid, skypehandle):
dbg("log_stop: user:%s skypehandle:%s" % (userjid, skypehandle), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "stop", "")
self._invoke(cmd, args)
## log_error(user-jid, skype-user, errormsg)
## record an error event
def log_error(self, userjid, skypehandle, errormsg):
dbg("log_error: user:%s skypehandle:%s errormsg:%s" % (userjid, skypehandle, errormsg), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "error", errormsg)
self._invoke(cmd, args)
#
# Cryptography API
# Invoked by individual BUNDLE to decode credentials
#----------------------------------------------------
class CryptoAPI:
def __init__(self): pass
## decrypt(encrypted-skype-password) -> skype-password
## decrypt the given input password
def decrypt(self, inputtext):
## KeyCzar
## crypter = keyczar.Crypter.Read(PRIVATE_KEYLOC)
## return crypter.Decrypt(inputtext)
return inputtext
| gpl-2.0 |
zhenwendai/RGP | gpnarx.py | 1 | 1997 | from __future__ import print_function
import GPy
import numpy as np
def transformTimeSeriesToSeq(Y, timeWindow):
Ntr,D = Y.shape
blocksNumber = Ntr - timeWindow
X = np.zeros((blocksNumber, timeWindow*D))
Ynew = np.zeros((blocksNumber,D))
for i in range(blocksNumber):
tmp = Y[i:i+timeWindow,:].T
X[i,:] = tmp.flatten().T
Ynew[i,:] = Y[i+timeWindow,:]
return X, Ynew
def transformSeqToTimeSeries(X, Y, timeWindow):
assert(X.shape[0] == Y.shape[0])
N = X.shape[0] + timeWindow
D = X.shape[1] / (timeWindow * 1.0)
Ynew = np.zeros((N, D))
for i in range(X.shape[0]):
Ynew[i:i+timeWindow, :] = X[i,:].reshape(D, timeWindow).T
Ynew[-1,:] = Y[-1,:]
return Ynew
def test_transformSeries(Y, timeWindow):
(xx,yy) = transformTimeSeriesToSeq(Y, timeWindow)
return transformSeqToTimeSeries(xx,yy,timeWindow)
def gp_narx(m, x_start, N, Uts, ws, Ydebug=None):
D = m.output_dim
Q = x_start.shape[1]
Y = np.empty((N,D,))
Y[:] = np.NAN
varY = Y.copy()
assert(Q%ws==0)
assert(D == Q/ws)
Xnew = m.X.copy()
Ynew = m.Y.copy()
curX = x_start
varYpred = None
for i in range(N):
# Make sure the added x_add is a matrix (1,Q) and not (Q,)
if len(curX.shape) < 2:
curX = curX.reshape(1,curX.shape[0])
varYpred_prev = varYpred
#Ypred, varYpred = m._raw_predict(np.hstack((curX,curU)))
#curU = Uts[i,:]
#Ypred, varYpred = m._raw_predict(np.hstack((curX,Uts[i,:][None,:])))
Ypred, varYpred = m.predict(np.hstack((curX,Uts[i,:][None,:])))
Y[i,:] = Ypred
varY[i,:] = varYpred
#print i, ': ', Y[i,:] , ' | var: ', varYpred #####
if Ydebug is not None:
print(i, ': X=', str(curX.flatten()), 'U=', str(Uts[i,:].flatten()), 'Y=', str(Ydebug[i,:]))
if i == N-1:
break
curX = np.hstack((curX[0,D:], Ypred[0,:]))
return Y, varY | bsd-3-clause |
maartenq/ansible | lib/ansible/modules/network/netscaler/netscaler_service.py | 67 | 31451 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_service
short_description: Manage service configuration in Netscaler
description:
- Manage service configuration in Netscaler.
- This module allows the creation, deletion and modification of Netscaler services.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
- This module supports check mode.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the service has been created.
- "Minimum length = 1"
ip:
description:
- "IP to assign to the service."
- "Minimum length = 1"
servername:
description:
- "Name of the server that hosts the service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol in which data is exchanged with the service."
port:
description:
- "Port number of the service."
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
cleartextport:
description:
- >-
Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic.
Applicable to transparent SSL services.
- "Minimum value = 1"
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections to the service."
- "Minimum value = 0"
- "Maximum value = 4294967294"
healthmonitor:
description:
- "Monitor the health of this service"
default: yes
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = 0"
- "Maximum value = 65535"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward requests to the cache server."
- "Note: Do not specify this parameter if you set the Cache Type parameter."
default: no
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6
address as its value. Used if the server needs the client's IP address for security, accounting, or
other purposes, and setting the Use Source IP parameter is not a viable option.
cipheader:
description:
- >-
Name for the HTTP header whose value must be set to the IP address of the client. Used with the
Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the
header, the appliance uses the header name specified for the global Client IP Header parameter (the
cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the
Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global
Client IP Header parameter is not specified, the appliance inserts a header with the name
"client-ip.".
- "Minimum length = 1"
usip:
description:
- >-
Use the client's IP address as the source IP address when initiating a connection to the server. When
creating a service, if you do not set this parameter, the service inherits the global Use Source IP
setting (available in the enable ns mode and disable ns mode CLI commands, or in the System >
Settings > Configure modes > Configure Modes dialog box). However, you can override this setting
after you create the service.
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES."
sp:
description:
- "Enable surge protection for the service."
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service."
default: off
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
customserverid:
description:
- >-
Unique identifier for the service. Used when the persistency type for the virtual server is set to
Custom Server ID.
default: 'None'
serverid:
description:
- "The identifier for the service. This is used when the persistency type is set to Custom Server ID."
cka:
description:
- "Enable client keep-alive for the service."
tcpb:
description:
- "Enable TCP buffering for the service."
cmp:
description:
- "Enable compression for the service."
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated to the service."
- "Minimum value = 0"
- "Maximum value = 4294967287"
accessdown:
description:
- >-
Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service
is DOWN, and this parameter is disabled, the packets are dropped.
default: no
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = 0"
- "Maximum value = 65535"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do
not enable this option for applications that must complete their transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
hashid:
description:
- >-
A numerical identifier that can be used by hash based load balancing methods. Must be unique for each
service.
- "Minimum value = 1"
comment:
description:
- "Any information about the service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information."
netprofile:
description:
- "Network profile to use for the service."
- "Minimum length = 1"
- "Maximum length = 127"
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = 0"
- "Maximum value = 4094"
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a service in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the service. DNS profile properties will applied to the
transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.
- "Minimum length = 1"
- "Maximum length = 127"
ipaddress:
description:
- "The new IP address of the service."
graceful:
description:
- >-
Shut down gracefully, not accepting any new connections, and disabling the service when all of its
connections are closed.
default: no
monitor_bindings:
description:
- A list of load balancing monitors to bind to this service.
- Each monitor entry is a dictionary which may contain the following options.
- Note that if not using the built in monitors they must first be setup.
suboptions:
monitorname:
description:
- Name of the monitor.
weight:
description:
- Weight to assign to the binding between the monitor and service.
dup_state:
choices:
- 'enabled'
- 'disabled'
description:
- State of the monitor.
- The state setting for a monitor of a given type affects all monitors of that type.
- For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled.
- If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
dup_weight:
description:
- Weight to assign to the binding between the monitor and service.
disabled:
description:
- When set to C(yes) the service state will be set to DISABLED.
- When set to C(no) the service state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Monitor monitor-1 must have been already setup
- name: Setup http service
gather_facts: False
delegate_to: localhost
netscaler_service:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: service-http-1
servicetype: HTTP
ipaddress: 10.78.0.1
port: 80
monitor_bindings:
- monitor-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
diff:
description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines,
get_immutables_intersection)
def service_exists(client, module):
if service.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def service_identical(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = service_proxy.diff_object(service_list[0])
# the actual ip address is stored in the ipaddress attribute
# of the retrieved object
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def diff(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_object = service_proxy.diff_object(service_list[0])
if 'ip' in diff_object:
del diff_object['ip']
return diff_object
def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs):
bindings = {}
if module.params['monitor_bindings'] is not None:
for binding in module.params['monitor_bindings']:
attribute_values_dict = copy.deepcopy(binding)
# attribute_values_dict['servicename'] = module.params['name']
attribute_values_dict['servicegroupname'] = module.params['name']
binding_proxy = ConfigProxy(
actual=lbmonitor_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=monitor_bindings_rw_attrs,
)
key = binding_proxy.monitorname
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
bindings = {}
if service_lbmonitor_binding.count(client, module.params['name']) == 0:
return bindings
# Fallthrough to rest of execution
for binding in service_lbmonitor_binding.get(client, module.params['name']):
# Excluding default monitors since we cannot operate on them
if binding.monitor_name in ('tcp-default', 'ping-default'):
continue
key = binding.monitor_name
actual = lbmonitor_service_binding()
actual.weight = binding.weight
actual.monitorname = binding.monitor_name
actual.dup_weight = binding.dup_weight
actual.servicename = module.params['name']
bindings[key] = actual
return bindings
def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_proxys.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for monitor_name in configured_key_set:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
diff_dict = proxy.diff_object(actual)
if 'servicegroupname' in diff_dict:
if proxy.servicegroupname == actual.servicename:
del diff_dict['servicegroupname']
if len(diff_dict) > 0:
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_keyset = set(configured_proxys.keys())
actual_keyset = set(actual_bindings.keys())
# Delete extra
delete_keys = list(actual_keyset - configured_keyset)
for monitor_name in delete_keys:
log('Deleting binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual_bindings[monitor_name])
# Delete and re-add modified
common_keyset = list(configured_keyset & actual_keyset)
for monitor_name in common_keyset:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
if not proxy.has_equal_attributes(actual):
log('Deleting and re adding binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual)
proxy.add()
# Add new
new_keys = list(configured_keyset - actual_keyset)
for monitor_name in new_keys:
log('Adding binding for monitor %s' % monitor_name)
configured_proxys[monitor_name].add()
def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs)
def do_state_change(client, module, service_proxy):
if module.params['disabled']:
log('Disabling service')
result = service.disable(client, service_proxy.actual)
else:
log('Enabling service')
result = service.enable(client, service_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ip=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX'
]
),
port=dict(type='int'),
cleartextport=dict(type='int'),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
healthmonitor=dict(
type='bool',
default=True,
),
maxreq=dict(type='float'),
cacheable=dict(
type='bool',
default=False,
),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
useproxyport=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(
type='bool',
default=False,
),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
customserverid=dict(
type='str',
default='None',
),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
accessdown=dict(
type='bool',
default=False
),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
netprofile=dict(type='str'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
dnsprofilename=dict(type='str'),
ipaddress=dict(type='str'),
graceful=dict(
type='bool',
default=False,
),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Fallthrough to rest of execution
# Instantiate Service Config object
readwrite_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'maxclient',
'healthmonitor',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'useproxyport',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'customserverid',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'accessdown',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'hashid',
'comment',
'appflowlog',
'netprofile',
'processlocal',
'dnsprofilename',
'ipaddress',
'graceful',
]
readonly_attrs = [
'numofconnections',
'policyname',
'serviceconftype',
'serviceconftype2',
'value',
'gslb',
'dup_state',
'publicip',
'publicport',
'svrstate',
'monitor_state',
'monstatcode',
'lastresponse',
'responsetime',
'riseapbrstatsmsgcode2',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'stateupdatereason',
'clmonowner',
'clmonview',
'serviceipstr',
'oracleserverversion',
]
immutable_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'cipheader',
'serverid',
'state',
'td',
'monitor_name_svc',
'riseapbrstatsmsgcode',
'graceful',
'all',
'Internal',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'accessdown': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
monitor_bindings_rw_attrs = [
'servicename',
'servicegroupname',
'dup_state',
'dup_weight',
'monitorname',
'weight',
]
# Translate module arguments to correspondign config oject attributes
if module.params['ip'] is None:
module.params['ip'] = module.params['ipaddress']
service_proxy = ConfigProxy(
actual=service(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not service_exists(client, module):
if not module.check_mode:
service_proxy.add()
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, service_proxy)
immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
# Service sync
if not service_identical(client, module, service_proxy):
if not module.check_mode:
service_proxy.update()
# Monitor bindings sync
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
if not module.check_mode:
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, service_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not service_exists(client, module):
module.fail_json(msg='Service does not exist', **module_result)
if not service_identical(client, module, service_proxy):
module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result)
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if service_exists(client, module):
if not module.check_mode:
service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if service_exists(client, module):
module.fail_json(msg='Service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
Curious72/sympy | sympy/core/tests/test_basic.py | 11 | 6009 | """This tests sympy/core/basic.py with (ideally) no reference to subclasses
of Basic or Atom."""
from sympy.core.basic import Basic, Atom, preorder_traversal
from sympy.core.singleton import S, Singleton
from sympy.core.symbol import symbols
from sympy.core.compatibility import default_sort_key, with_metaclass
from sympy import sin, Lambda, Q
from sympy.utilities.pytest import raises
b1 = Basic()
b2 = Basic(b1)
b3 = Basic(b2)
b21 = Basic(b2, b1)
def test_structure():
assert b21.args == (b2, b1)
assert b21.func(*b21.args) == b21
assert bool(b1)
def test_equality():
instances = [b1, b2, b3, b21, Basic(b1, b1, b1), Basic]
for i, b_i in enumerate(instances):
for j, b_j in enumerate(instances):
assert (b_i == b_j) == (i == j)
assert (b_i != b_j) == (i != j)
assert Basic() != []
assert not(Basic() == [])
assert Basic() != 0
assert not(Basic() == 0)
def test_matches_basic():
instances = [Basic(b1, b1, b2), Basic(b1, b2, b1), Basic(b2, b1, b1),
Basic(b1, b2), Basic(b2, b1), b2, b1]
for i, b_i in enumerate(instances):
for j, b_j in enumerate(instances):
if i == j:
assert b_i.matches(b_j) == {}
else:
assert b_i.matches(b_j) is None
assert b1.match(b1) == {}
def test_has():
assert b21.has(b1)
assert b21.has(b3, b1)
assert b21.has(Basic)
assert not b1.has(b21, b3)
assert not b21.has()
def test_subs():
assert b21.subs(b2, b1) == Basic(b1, b1)
assert b21.subs(b2, b21) == Basic(b21, b1)
assert b3.subs(b2, b1) == b2
assert b21.subs([(b2, b1), (b1, b2)]) == Basic(b2, b2)
assert b21.subs({b1: b2, b2: b1}) == Basic(b2, b2)
raises(ValueError, lambda: b21.subs('bad arg'))
raises(ValueError, lambda: b21.subs(b1, b2, b3))
def test_atoms():
assert b21.atoms() == set()
def test_free_symbols_empty():
assert b21.free_symbols == set()
def test_doit():
assert b21.doit() == b21
assert b21.doit(deep=False) == b21
def test_S():
assert repr(S) == 'S'
def test_xreplace():
assert b21.xreplace({b2: b1}) == Basic(b1, b1)
assert b21.xreplace({b2: b21}) == Basic(b21, b1)
assert b3.xreplace({b2: b1}) == b2
assert Basic(b1, b2).xreplace({b1: b2, b2: b1}) == Basic(b2, b1)
assert Atom(b1).xreplace({b1: b2}) == Atom(b1)
assert Atom(b1).xreplace({Atom(b1): b2}) == b2
raises(TypeError, lambda: b1.xreplace())
raises(TypeError, lambda: b1.xreplace([b1, b2]))
def test_Singleton():
global instantiated
instantiated = 0
class MySingleton(with_metaclass(Singleton, Basic)):
def __new__(cls):
global instantiated
instantiated += 1
return Basic.__new__(cls)
assert instantiated == 0
MySingleton() # force instantiation
assert instantiated == 1
assert MySingleton() is not Basic()
assert MySingleton() is MySingleton()
assert S.MySingleton is MySingleton()
assert instantiated == 1
class MySingleton_sub(MySingleton):
pass
assert instantiated == 1
MySingleton_sub()
assert instantiated == 2
assert MySingleton_sub() is not MySingleton()
assert MySingleton_sub() is MySingleton_sub()
def test_preorder_traversal():
expr = Basic(b21, b3)
assert list(
preorder_traversal(expr)) == [expr, b21, b2, b1, b1, b3, b2, b1]
assert list(preorder_traversal(('abc', ('d', 'ef')))) == [
('abc', ('d', 'ef')), 'abc', ('d', 'ef'), 'd', 'ef']
result = []
pt = preorder_traversal(expr)
for i in pt:
result.append(i)
if i == b2:
pt.skip()
assert result == [expr, b21, b2, b1, b3, b2]
w, x, y, z = symbols('w:z')
expr = z + w*(x + y)
assert list(preorder_traversal([expr], keys=default_sort_key)) == \
[[w*(x + y) + z], w*(x + y) + z, z, w*(x + y), w, x + y, x, y]
assert list(preorder_traversal((x + y)*z, keys=True)) == \
[z*(x + y), z, x + y, x, y]
def test_sorted_args():
x = symbols('x')
assert b21._sorted_args == b21.args
raises(AttributeError, lambda: x._sorted_args)
def test_call():
x, y = symbols('x y')
# See the long history of this in issues 5026 and 5105.
raises(TypeError, lambda: sin(x)({ x : 1, sin(x) : 2}))
raises(TypeError, lambda: sin(x)(1))
# No effect as there are no callables
assert sin(x).rcall(1) == sin(x)
assert (1 + sin(x)).rcall(1) == 1 + sin(x)
# Effect in the pressence of callables
l = Lambda(x, 2*x)
assert (l + x).rcall(y) == 2*y + x
assert (x**l).rcall(2) == x**4
# TODO UndefinedFunction does not subclass Expr
#f = Function('f')
#assert (2*f)(x) == 2*f(x)
assert (Q.real & Q.positive).rcall(x) == Q.real(x) & Q.positive(x)
def test_literal_evalf_is_number_is_zero_is_comparable():
from sympy.integrals.integrals import Integral
from sympy.core.symbol import symbols
from sympy.core.function import Function
from sympy.functions.elementary.trigonometric import cos, sin
x = symbols('x')
f = Function('f')
# the following should not be changed without a lot of dicussion
# `foo.is_number` should be equivalent to `not foo.free_symbols`
# it should not attempt anything fancy; see is_zero, is_constant
# and equals for more rigorous tests.
assert f(1).is_number is True
i = Integral(0, (x, x, x))
# expressions that are symbolically 0 can be difficult to prove
# so in case there is some easy way to know if something is 0
# it should appear in the is_zero property for that object;
# if is_zero is true evalf should always be able to compute that
# zero
assert i.n() == 0
assert i.is_zero
assert i.is_number is False
assert i.evalf(2, strict=False) == 0
# issue 10268
n = sin(1)**2 + cos(1)**2 - 1
assert n.is_comparable is False
assert n.n(2).is_comparable is False
assert n.n(2).n(2).is_comparable
| bsd-3-clause |
frewsxcv/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/support.py | 450 | 5496 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import codecs
import glob
import xml.sax.handler
base_path = os.path.split(__file__)[0]
test_dir = os.path.join(base_path, 'testdata')
sys.path.insert(0, os.path.abspath(os.path.join(base_path,
os.path.pardir,
os.path.pardir)))
from html5lib import treebuilders
del base_path
# Build a dict of avaliable trees
treeTypes = {"DOM": treebuilders.getTreeBuilder("dom")}
# Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
import lxml.etree as lxml # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml")
def get_data_files(subdirectory, files='*.dat'):
return glob.glob(os.path.join(test_dir, subdirectory, files))
class DefaultDict(dict):
def __init__(self, default, *args, **kwargs):
self.default = default
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key, self.default)
class TestData(object):
def __init__(self, filename, newTestHeading="data", encoding="utf8"):
if encoding is None:
self.f = open(filename, mode="rb")
else:
self.f = codecs.open(filename, encoding=encoding)
self.encoding = encoding
self.newTestHeading = newTestHeading
def __del__(self):
self.f.close()
def __iter__(self):
data = DefaultDict(None)
key = None
for line in self.f:
heading = self.isSectionHeading(line)
if heading:
if data and heading == self.newTestHeading:
# Remove trailing newline
data[key] = data[key][:-1]
yield self.normaliseOutput(data)
data = DefaultDict(None)
key = heading
data[key] = "" if self.encoding else b""
elif key is not None:
data[key] += line
if data:
yield self.normaliseOutput(data)
def isSectionHeading(self, line):
"""If the current heading is a test section heading return the heading,
otherwise return False"""
# print(line)
if line.startswith("#" if self.encoding else b"#"):
return line[1:].strip()
else:
return False
def normaliseOutput(self, data):
# Remove trailing newlines
for key, value in data.items():
if value.endswith("\n" if self.encoding else b"\n"):
data[key] = value[:-1]
return data
def convert(stripChars):
def convertData(data):
"""convert the output of str(document) to the format used in the testcases"""
data = data.split("\n")
rv = []
for line in data:
if line.startswith("|"):
rv.append(line[stripChars:])
else:
rv.append(line)
return "\n".join(rv)
return convertData
convertExpected = convert(2)
def errorMessage(input, expected, actual):
msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
(repr(input), repr(expected), repr(actual)))
if sys.version_info.major == 2:
msg = msg.encode("ascii", "backslashreplace")
return msg
class TracingSaxHandler(xml.sax.handler.ContentHandler):
def __init__(self):
xml.sax.handler.ContentHandler.__init__(self)
self.visited = []
def startDocument(self):
self.visited.append('startDocument')
def endDocument(self):
self.visited.append('endDocument')
def startPrefixMapping(self, prefix, uri):
# These are ignored as their order is not guaranteed
pass
def endPrefixMapping(self, prefix):
# These are ignored as their order is not guaranteed
pass
def startElement(self, name, attrs):
self.visited.append(('startElement', name, attrs))
def endElement(self, name):
self.visited.append(('endElement', name))
def startElementNS(self, name, qname, attrs):
self.visited.append(('startElementNS', name, qname, dict(attrs)))
def endElementNS(self, name, qname):
self.visited.append(('endElementNS', name, qname))
def characters(self, content):
self.visited.append(('characters', content))
def ignorableWhitespace(self, whitespace):
self.visited.append(('ignorableWhitespace', whitespace))
def processingInstruction(self, target, data):
self.visited.append(('processingInstruction', target, data))
def skippedEntity(self, name):
self.visited.append(('skippedEntity', name))
| mpl-2.0 |
jmcarp/regulations-parser | regparser/layer/interpretations.py | 7 | 2506 | from collections import defaultdict
from regparser.citations import Label
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.interpretation import text_to_labels
class Interpretations(Layer):
"""Supplement I (interpretations) provides (sometimes very lengthy) extra
information about particular paragraphs. This layer provides those
interpretations."""
def __init__(self, *args, **kwargs):
Layer.__init__(self, *args, **kwargs)
self.lookup_table = defaultdict(list)
def pre_process(self):
"""Create a lookup table for each interpretation"""
def per_node(node):
if (node.node_type != struct.Node.INTERP
or node.label[-1] != struct.Node.INTERP_MARK):
return
# Always add a connection based on the interp's label
self.lookup_table[tuple(node.label[:-1])].append(node)
# Also add connections based on the title
for label in text_to_labels(node.title or '',
Label.from_node(node),
warn=False):
label = tuple(label[:-1]) # Remove Interp marker
if node not in self.lookup_table[label]:
self.lookup_table[label].append(node)
struct.walk(self.tree, per_node)
def process(self, node):
"""Is there an interpretation associated with this node? If yes,
return the associated layer information. @TODO: Right now, this only
associates if there is a direct match. It should also associate if any
parents match"""
label = tuple(node.label)
if self.lookup_table[label]: # default dict; will always be present
interp_labels = [n.label_id() for n in self.lookup_table[label]
if not self.empty_interpretation(n)]
return [{'reference': l} for l in interp_labels] or None
def empty_interpretation(self, interp):
"""We don't want to include empty (e.g. \n\n) nodes as
interpretations unless their children are subparagraphs. We
distinguish subparagraphs from structural children by checking the
location of the 'Interp' delimiter."""
if interp.text.strip():
return False
return all(not child.label
or child.label[-1] == struct.Node.INTERP_MARK
for child in interp.children)
| cc0-1.0 |
Spiderlover/Toontown | toontown/suit/SuitInvasionManagerAI.py | 1 | 11138 | import time
from random import random, randint, choice
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from toontown.battle import SuitBattleGlobals
from toontown.toonbase.ToontownGlobals import IDES_OF_MARCH
import SuitDNA
from SuitInvasionGlobals import *
class SuitInvasionManagerAI:
notify = directNotify.newCategory('SuitInvasionManagerAI')
def __init__(self, air):
self.air = air
self.invading = False
self.start = 0
self.remaining = 0
self.total = 0
self.suitDeptIndex = None
self.suitTypeIndex = None
self.megaInvasion = None
self.megaInvasionCog = None
self.megaInvasionFlags = None
self.flags = 0
self.isSkelecog = 0
self.isV2 = 0
self.isWaiter = 0
self.isVirtual = 0
self.isRental = 0
self.flags = [0, 0, 0, 0, 0]
self.air.netMessenger.accept(
'startInvasion', self, self.handleStartInvasion)
self.air.netMessenger.accept(
'stopInvasion', self, self.handleStopInvasion)
# We want to handle shard status queries so that a ShardStatusReceiver
# being created after we're created will know where we're at:
self.air.netMessenger.accept('queryShardStatus', self, self.sendInvasionStatus)
self.safeHarbours = []
tempSafeHarbours = config.GetString('safe-harbours','')
if tempSafeHarbours != '':
for safeHarbour in tempSafeHarbours.split(","):
safeHarbour = safeHarbour.strip()
self.safeHarbours.append(safeHarbour)
if config.GetBool('want-mega-invasions', False):
self.randomInvasionProbability = config.GetFloat('mega-invasion-probability', 0.65)
if self.air.distributedDistrict.name in self.safeHarbours:
self.notify.debug("Can't summon mega invasion in safe harbour!")
elif self.air.holidayManager.isHolidayRunning(IDES_OF_MARCH):#Temp
self.megaInvasion = IDES_OF_MARCH
#if self.megaInvasion:
# self.megaInvasionCog = megaInvasionDict[self.megaInvasion][0]
taskMgr.doMethodLater(randint(1800, 5400), self.__randomInvasionTick, 'random-invasion-tick')
self.sendInvasionStatus()
def getInvading(self):
return self.invading
def getInvadingCog(self):
return (self.suitDeptIndex, self.suitTypeIndex, self.flags)
def startInvasion(self, suitDeptIndex=None, suitTypeIndex=None, flags=[0, 0, 0, 0, 0],
type=INVASION_TYPE_NORMAL):
if self.invading:
# An invasion is currently in progress; ignore this request.
return False
if (suitDeptIndex is None) and (suitTypeIndex is None) and (not flags):
# This invasion is no-op.
return False
if((flags[2] == 1) and (flags[0] == 1 or flags[4] == 1)):
return False
if((flags[0] == 1) and (flags[1] == 1 or flags[2] == 1 or flags[4] == 1)):
return False
if (suitDeptIndex is None) and (suitTypeIndex is not None):
# It's impossible to determine the invading Cog.
return False
if (suitDeptIndex is not None) and (suitDeptIndex >= len(SuitDNA.suitDepts)):
# Invalid suit department.
return False
if (suitTypeIndex is not None) and (suitTypeIndex >= SuitDNA.suitsPerDept):
# Invalid suit type.
return False
if type not in (INVASION_TYPE_NORMAL, INVASION_TYPE_MEGA):
# Invalid invasion type.
return False
# Looks like we're all good. Begin the invasion:
self.invading = True
self.start = int(time.time())
self.suitDeptIndex = suitDeptIndex
self.suitTypeIndex = suitTypeIndex
self.flags = flags
self.isSkelecog = flags[0]
self.isV2 = flags[1]
self.isWaiter = flags[2]
self.isVirtual = flags[3]
self.isRental = flags[4]
# How many suits do we want?
if type == INVASION_TYPE_NORMAL:
self.total = 1000
elif type == INVASION_TYPE_MEGA:
self.total = randint(1800, 5400)
self.remaining = self.total
self.flySuits()
self.notifyInvasionStarted()
# Update the invasion tracker on the districts page in the Shticker Book:
if self.suitDeptIndex is not None:
self.air.districtStats.b_setInvasionStatus(self.suitDeptIndex + 1)
else:
self.air.districtStats.b_setInvasionStatus(5)
# If this is a normal invasion, and the players take too long to defeat
# all of the Cogs, we'll want the invasion to timeout:
if type == INVASION_TYPE_NORMAL:
timeout = config.GetInt('invasion-timeout', 1800)
taskMgr.doMethodLater(timeout, self.stopInvasion, 'invasionTimeout')
self.sendInvasionStatus()
return True
def stopInvasion(self, task=None):
if not self.invading:
# We are not currently invading.
return False
# Stop the invasion timeout task:
taskMgr.remove('invasionTimeout')
# Update the invasion tracker on the districts page in the Shticker Book:
self.air.districtStats.b_setInvasionStatus(0)
# Revert what was done when the invasion started:
self.notifyInvasionEnded()
self.invading = False
self.start = 0
self.suitDeptIndex = None
self.suitTypeIndex = None
self.flags = None
self.total = 0
self.remaining = 0
self.flySuits()
self.sendInvasionStatus()
return True
def getSuitName(self):
if self.suitDeptIndex is not None:
if self.suitTypeIndex is not None:
return SuitDNA.getSuitName(self.suitDeptIndex, self.suitTypeIndex)
else:
return SuitDNA.suitDepts[self.suitDeptIndex]
else:
return SuitDNA.suitHeadTypes[0]
def notifyInvasionStarted(self):
msgType = SuitInvasionBegin
if self.isSkelecog:
msgType = SkelecogInvasionBegin
elif self.isV2:
msgType = V2InvasionBegin
elif self.isWaiter:
msgType = WaiterInvasionBegin
elif self.isVirtual:
msgType = VirtualInvasionBegin
elif self.isRental:
msgType = RentalInvasionBegin
self.air.newsManager.sendUpdate(
'setInvasionStatus',
[msgType, self.getSuitName(), self.total, self.flags])
def notifyInvasionEnded(self):
msgType = SuitInvasionEnd
if self.isSkelecog:
msgType = SkelecogInvasionEnd
elif self.isV2:
msgType = V2InvasionEnd
elif self.isWaiter:
msgType = WaiterInvasionEnd
elif self.isVirtual:
msgType = VirtualInvasionEnd
elif self.isRental:
msgType = RentalInvasionEnd
self.air.newsManager.sendUpdate(
'setInvasionStatus', [msgType, self.getSuitName(), 0, self.flags])
def notifyInvasionUpdate(self):
self.air.newsManager.sendUpdate(
'setInvasionStatus',
[SuitInvasionUpdate, self.getSuitName(),
self.remaining, self.flags])
def notifyInvasionBulletin(self, avId):
msgType = SuitInvasionBulletin
if self.isSkelecog:
msgType = SkelecogInvasionBulletin
elif self.isV2:
msgType = V2InvasionBulletin
elif self.isWaiter:
msgType = WaiterInvasionBulletin
elif self.isVirtual:
msgType = VirtualInvasionBulletin
elif self.isRental:
msgType = RentalInvasionBulletin
self.air.newsManager.sendUpdateToAvatarId(
avId, 'setInvasionStatus',
[msgType, self.getSuitName(), self.remaining, self.flags])
def flySuits(self):
for suitPlanner in self.air.suitPlanners.values():
suitPlanner.flySuits()
def handleSuitDefeated(self):
self.remaining -= 1
if self.remaining == 0:
self.stopInvasion()
elif self.remaining == (self.total/2):
self.notifyInvasionUpdate()
self.sendInvasionStatus()
def handleStartInvasion(self, shardId, *args):
if shardId == self.air.ourChannel:
self.startInvasion(*args)
def handleStopInvasion(self, shardId):
if shardId == self.air.ourChannel:
self.stopInvasion()
def sendInvasionStatus(self):
if self.invading:
if self.suitDeptIndex is not None:
if self.suitTypeIndex is not None:
type = SuitBattleGlobals.SuitAttributes[self.getSuitName()]['name']
else:
type = SuitDNA.getDeptFullname(self.getSuitName())
else:
type = None
status = {
'invasion': {
'type': type,
'flags': [self.isSkelecog, self.isV2, self.isWaiter, self.isVirtual, self.isRental],
'remaining': self.remaining,
'total': self.total,
'start': self.start
}
}
else:
status = {'invasion': None}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
def __randomInvasionTick(self, task=None):
"""
Each hour, have a tick to check if we want to start an invasion in
the current district. This works by having a random invasion
probability, and each tick it will generate a random float between
0 and 1, and then if it's less than or equal to the probablity, it
will spawn the invasion.
An invasion will not be started if there is an invasion already
on-going.
"""
# Generate a new tick delay.
task.delayTime = randint(1800, 5400)
if self.getInvading():
# We're already running an invasion. Don't start a new one.
self.notify.debug('Invasion tested but already running invasion!')
return task.again
if random() <= self.randomInvasionProbability:
# We want an invasion!
self.notify.debug('Invasion probability hit! Starting invasion.')
if config.GetBool('want-mega-invasions', False):
suitDept = megaInvasionDict[self.megaInvasion][0][0]
suitIndex = megaInvasionDict[self.megaInvasion][0][1]
if megaInvasionDict[self.megaInvasion][2]:
rngFlag = randint(0, 4)
flags = [0, 0, 0, 0, 0]
flags[rngFlag] = 1
else:
flags = megaInvasionDict[self.megaInvasion][1]
self.startInvasion(suitDept, suitIndex, flags, INVASION_TYPE_MEGA)
return task.again
| mit |
qmarlats/pyquizz | env-3/lib/python3.5/site-packages/pygments/lexers/iolang.py | 47 | 1904 | # -*- coding: utf-8 -*-
"""
pygments.lexers.iolang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Io language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
__all__ = ['IoLexer']
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
.. versionadded:: 0.10
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
(r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_]\w*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
| gpl-3.0 |
morreene/tradenews | venv/Lib/site-packages/sqlalchemy/ext/baked.py | 32 | 16967 | # sqlalchemy/ext/baked.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
def call(initial_fn, *args):
return cls(_bakery, initial_fn, args)
return call
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._cache_key, None)
if query is None:
query = self._as_query(session)
self._bakery[self._cache_key] = query.with_session(None)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
'_correlate', '_from_obj', '_mapper_adapter_map',
'_joinpath', '_joinpoint'):
query.__dict__.pop(attr, None)
self._bakery[self._cache_key] = context
return context
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def _as_query(self):
return self.bq._as_query(self.session).params(self._params)
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._cache_key, None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(self.session, context, self._params)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
return context.query.params(self._params).\
with_session(self.session)._execute_and_instances(context)
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause, )
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:]
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class BakedLazyLoader(strategies.LazyLoader):
def _emit_lazyload(self, session, state, ident_key, passive):
q = BakedQuery(
self.mapper._compiled_cache,
lambda session: session.query(self.mapper))
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q:
q.select_from(self.mapper, self.parent_property.secondary))
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_path:
q.spoil()
q.add_criteria(
lambda q:
q._with_current_path(state.load_path[self.parent_property]))
if state.load_options:
q.spoil()
q.add_criteria(
lambda q: q._conditional_options(*state.load_options))
if self.use_get:
return q(session)._load_on_ident(
session.query(self.mapper), ident_key)
if self.parent_property.order_by:
q.add_criteria(
lambda q:
q.order_by(*util.to_list(self.parent_property.order_by)))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, strategies.LazyLoader):
q.add_criteria(
lambda q:
q.options(
strategy_options.Load(
rev.parent).baked_lazyload(rev.key)))
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if orm_util._none_set.intersection(params.values()):
return None
q.add_criteria(lambda q: q.filter(lazy_clause))
result = q(session).params(**params).all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {})
@baked_lazyload._add_unbound_all_fn
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {})
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
| bsd-3-clause |
sw-irou/flasktest | lib/werkzeug/serving.py | 309 | 27668 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import signal
import subprocess
try:
import thread
except ImportError:
import _thread as thread
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError, BadRequest
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| bsd-3-clause |
bakhtout/odoo-educ | addons/website_mail/models/mail_thread.py | 338 | 1454 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
# TODO for trunk, remove me
class MailThread(osv.AbstractModel):
_inherit = 'mail.thread'
_columns = {
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
}
| agpl-3.0 |
super13/tensorflow-speech-recognition-pai | src/smodels/RNN/utils.py | 1 | 1207 | import os
import tensorflow as tf
from configparser import ConfigParser
from utilities.set_dirs import get_conf_dir
conf_dir = get_conf_dir(debug=False)
parser = ConfigParser(os.environ)
parser.read(os.path.join(conf_dir, 'neural_network.ini'))
# AdamOptimizer
beta1 = parser.getfloat('optimizer', 'beta1')
beta2 = parser.getfloat('optimizer', 'beta2')
epsilon = parser.getfloat('optimizer', 'epsilon')
learning_rate = parser.getfloat('optimizer', 'learning_rate')
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon)
return optimizer
| mit |
akloster/bokeh | bokeh/properties.py | 20 | 42601 | """ Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import re
import types
import difflib
import datetime
import dateutil.parser
import collections
from importlib import import_module
from copy import copy
from warnings import warn
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums
from .util.string import nice_join
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class Property(object):
""" Base class for all type properties. """
def __init__(self, default=None, help=None):
""" This is how the descriptor is created in the class declaration """
if isinstance(default, types.FunctionType): # aka. lazy value
self.validate(default())
else:
self.validate(default)
self._default = default
self.__doc__ = help
self.alternatives = []
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@property
def default(self):
if not isinstance(self._default, types.FunctionType):
return copy(self._default)
else:
value = self._default()
self.validate(value)
return value
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def from_json(self, json, models=None):
return json
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def _get(self, obj):
if not hasattr(obj, self._name):
setattr(obj, self._name, self.default)
return getattr(obj, self._name)
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Include(object):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop.delegate
if prop.use_prefix:
prefix = re.sub("_props$", "", name) + "_"
else:
prefix = ""
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
if "%s" in prop.help:
doc = prop.help % subpropname.replace('_', ' ')
else:
doc = prop.help
try:
includes[fullpropname] = subprop(help=doc)
except TypeError:
includes[fullpropname] = subprop
subprop.__doc__ = doc
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
units_to_add = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
if hasattr(prop, '_units_type'):
units_to_add[name+"_units"] = prop._units_type
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
for name, prop in units_to_add.items():
prop.name = name
names.add(name)
class_dict[name] = prop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._changed_vars = set()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
props = sorted(self.properties())
if name.startswith("_") or name in props:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.changed_properties_with_values())
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties(self):
return self.changed_vars()
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = (bool,)
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (float, ) + bokeh_integer_types
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (complex, float) + bokeh_integer_types
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedProperty(Property):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
""" Base class for Container-like type properties. """
pass
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# opional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(Property):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .plot_object import PlotObject
if issubclass(self.instance_type, PlotObject):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserilize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property):
""" Any type property accepts any values. """
pass
class Function(Property):
""" Function type property. """
pass
class Event(Property):
""" Event type property. """
pass
class Interval(ParameterizedProperty):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
help = kwargs.get("help")
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(Property):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
# Check for None value
if val is None:
return dict(value=None)
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r)" % (self.__class__.__name__, val)
class NumberSpec(DataSpec):
def __init__(self, default, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
super(StringSpec, self).__set__(obj, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
super(FontSizeSpec, self).__set__(obj, value)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
self._units_type.validate(units_default)
self._units_type._default = units_default
def to_dict(self, obj):
d = super(UnitsSpec, self).to_dict(obj)
d["units"] = getattr(obj, self.name+"_units")
return d
def __set__(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units: setattr(obj, self.name+"_units", units)
super(UnitsSpec, self).__set__(obj, value)
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r, units_default=%r)" % (self.__class__.__name__, val, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DistanceSpec, self).__set__(obj, value)
class ScreenDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "screen"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(ScreenDistanceSpec, self).__set__(obj, value)
class DataDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "data"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DataDistanceSpec, self).__set__(obj, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor._values)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
| bsd-3-clause |
40223234/2015cdb_g1_0134 | static/Brython3.1.1-20150328-091302/Lib/test/test_re.py | 718 | 56009 | # FIXME: brython: implement test.support
#from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
# cpython_only
verbose = True
# FIXME: brython: Not used in this module ?
#import io
import re
# FIXME: brython: implement re.Scanner
#from re import Scanner
import sre_constants
import sys
import string
import traceback
# FIXME: brython: implement _weakref
#from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
# FIXME: brython: implement test.support
# def test_keep_buffer(self):
# # See bug 14212
# b = bytearray(b'x')
# it = re.finditer(b'a', b)
# with self.assertRaises(BufferError):
# b.extend(b'x'*400)
# list(it)
# del it
# gc_collect()
# b.extend(b'x'*400)
# FIXME: brython: implement _weakref
# def test_weakref(self):
# s = 'QabbbcR'
# x = re.compile('ab+c')
# y = proxy(x)
# self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
# New valid/invalid identifiers in Python 3
re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.assertRaises(re.error, re.compile, '(?P<©>x)')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
# New valid/invalid identifiers in Python 3
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
"abc")
# There's a word boundary at the start of a string.
self.assertTrue(re.match(r"\b", "abc"))
# A non-empty string includes a non-boundary zero-length match.
self.assertTrue(re.search(r"\B", "abc"))
# There is no non-boundary match at the start of a string.
self.assertFalse(re.match(r"\B", "abc"))
# However, an empty string contains no word boundaries, and also no
# non-boundaries.
self.assertEqual(re.search(r"\B", ""), None)
# This one is questionable and different from the perlre behaviour,
# but describes current behavior.
self.assertEqual(re.search(r"\b", ""), None)
# A single word-character string has two boundaries, but no
# non-boundary gaps.
self.assertEqual(len(re.findall(r"\b", "a")), 2)
self.assertEqual(len(re.findall(r"\B", "a")), 0)
# If there are no words, there are no boundaries
self.assertEqual(len(re.findall(r"\b", " ")), 0)
self.assertEqual(len(re.findall(r"\b", " ")), 0)
# Can match around the whitespace.
self.assertEqual(len(re.findall(r"\B", " ")), 2)
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_big_codesize(self):
# Issue #1160
r = re.compile('|'.join(('%d'%x for x in range(10000))))
self.assertIsNotNone(r.match('1000'))
self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def assertMatch(self, pattern, text, match=None, span=None,
matcher=re.match):
if match is None and span is None:
# the pattern matches the whole text
match = text
span = (0, len(text))
elif match is None or span is None:
raise ValueError('If match is not None, span should be specified '
'(and vice versa).')
m = matcher(pattern, text)
self.assertTrue(m)
self.assertEqual(m.group(), match)
self.assertEqual(m.span(), span)
def test_re_escape(self):
alnum_chars = string.ascii_letters + string.digits + '_'
p = ''.join(chr(i) for i in range(256))
for c in p:
if c in alnum_chars:
self.assertEqual(re.escape(c), c)
elif c == '\x00':
self.assertEqual(re.escape(c), '\\000')
else:
self.assertEqual(re.escape(c), '\\' + c)
self.assertMatch(re.escape(c), c)
self.assertMatch(re.escape(p), p)
def test_re_escape_byte(self):
alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii')
p = bytes(range(256))
for i in p:
b = bytes([i])
if b in alnum_chars:
self.assertEqual(re.escape(b), b)
elif i == 0:
self.assertEqual(re.escape(b), b'\\000')
else:
self.assertEqual(re.escape(b), b'\\' + b)
self.assertMatch(re.escape(b), b)
self.assertMatch(re.escape(p), p)
def test_re_escape_non_ascii(self):
s = 'xxx\u2620\u2620\u2620xxx'
s_escaped = re.escape(s)
self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx')
self.assertMatch(s_escaped, s)
self.assertMatch('.%s+.' % re.escape('\u2620'), s,
'x\u2620\u2620\u2620x', (2, 7), re.search)
def test_re_escape_non_ascii_bytes(self):
b = 'y\u2620y\u2620y'.encode('utf-8')
b_escaped = re.escape(b)
self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y')
self.assertMatch(b_escaped, b)
res = re.findall(re.escape('\u2620'.encode('utf-8')), b)
self.assertEqual(len(res), 2)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"\%03o" % i, chr(i)))
self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8"))
self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z"))
if i < 0x10000:
self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\0", "\000"))
self.assertIsNotNone(re.match(r"\08", "\0008"))
self.assertIsNotNone(re.match(r"\01", "\001"))
self.assertIsNotNone(re.match(r"\018", "\0018"))
self.assertIsNotNone(re.match(r"\567", chr(0o167)))
self.assertRaises(re.error, re.match, r"\911", "")
self.assertRaises(re.error, re.match, r"\x1", "")
self.assertRaises(re.error, re.match, r"\x1z", "")
self.assertRaises(re.error, re.match, r"\u123", "")
self.assertRaises(re.error, re.match, r"\u123z", "")
self.assertRaises(re.error, re.match, r"\U0001234", "")
self.assertRaises(re.error, re.match, r"\U0001234z", "")
self.assertRaises(re.error, re.match, r"\U00110000", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i)))
if i < 0x10000:
self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e"))
self.assertRaises(re.error, re.match, r"[\911]", "")
self.assertRaises(re.error, re.match, r"[\x1z]", "")
self.assertRaises(re.error, re.match, r"[\u123z]", "")
self.assertRaises(re.error, re.match, r"[\U0001234z]", "")
self.assertRaises(re.error, re.match, r"[\U00110000]", "")
def test_sre_byte_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8"))
self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z"))
self.assertIsNotNone(re.match(br"\u", b'u'))
self.assertIsNotNone(re.match(br"\U", b'U'))
self.assertIsNotNone(re.match(br"\0", b"\000"))
self.assertIsNotNone(re.match(br"\08", b"\0008"))
self.assertIsNotNone(re.match(br"\01", b"\001"))
self.assertIsNotNone(re.match(br"\018", b"\0018"))
self.assertIsNotNone(re.match(br"\567", bytes([0o167])))
self.assertRaises(re.error, re.match, br"\911", b"")
self.assertRaises(re.error, re.match, br"\x1", b"")
self.assertRaises(re.error, re.match, br"\x1z", b"")
def test_sre_byte_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match(br"[\u]", b'u'))
self.assertIsNotNone(re.match(br"[\U]", b'U'))
self.assertRaises(re.error, re.match, br"[\911]", "")
self.assertRaises(re.error, re.match, br"[\x1z]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_unlimited_zero_width_repeat(self):
# Issue #9669
self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
# def test_scanner(self):
# def s_ident(scanner, token): return token
# def s_operator(scanner, token): return "op%s" % token
# def s_float(scanner, token): return float(token)
# def s_int(scanner, token): return int(token)
#
# scanner = Scanner([
# (r"[a-zA-Z_]\w*", s_ident),
# (r"\d+\.\d*", s_float),
# (r"\d+", s_int),
# (r"=|\+|-|\*|/", s_operator),
# (r"\s+", None),
# ])
#
# self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
#
# self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
# (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
# 'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def test_search_dot_unicode(self):
self.assertIsNotNone(re.search("123.*-", '123abc-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9-'))
self.assertIsNotNone(re.search("123.*-", '123\u20ac-'))
self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-'))
def test_compile(self):
# Test return value when given string and pattern as parameter
pattern = re.compile('random pattern')
self.assertIsInstance(pattern, re._pattern_type)
same_pattern = re.compile(pattern)
self.assertIsInstance(same_pattern, re._pattern_type)
self.assertIs(same_pattern, pattern)
# Test behaviour when not given a string or pattern as parameter
self.assertRaises(TypeError, re.compile, 0)
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
['A', 'B', '\b', 'C', 'Z'])
# FIXME: brython: implement test.support
# @bigmemtest(size=_2G, memuse=1)
# def test_large_search(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# m = re.search('$', s)
# self.assertIsNotNone(m)
# self.assertEqual(m.start(), size)
# self.assertEqual(m.end(), size)
# FIXME: brython: implement test.support
# The huge memuse is because of re.sub() using a list and a join()
# to create the replacement result.
# @bigmemtest(size=_2G, memuse=16 + 2)
# def test_large_subn(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# r, n = re.subn('', '', s)
# self.assertEqual(r, s)
# self.assertEqual(n, size + 1)
def test_bug_16688(self):
# Issue 16688: Backreferences make case-insensitive regex fail on
# non-ASCII strings.
self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a'])
self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2))
def test_repeat_minmax_overflow(self):
# Issue #13169
string = "x" * 100000
self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
# 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
# FIXME: brython: implement test.support
# @cpython_only
# def test_repeat_minmax_overflow_maxrepeat(self):
# try:
# from _sre import MAXREPEAT
# except ImportError:
# self.skipTest('requires _sre.MAXREPEAT constant')
# string = "x" * 100000
# self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
# self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
# (0, 100000))
# self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
# self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
def test_backref_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '<foo>'):
re.compile('(?P=<foo>)')
def test_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '\?foo'):
re.compile('(?P<?foo>)')
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
# FIXME: brython: implement test.support
# run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
hthompson6/contrail-controller | src/config/utils/service-instance.py | 9 | 9286 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import errno
import pprint
import subprocess
import time
import argparse
sys.path.insert(0, os.path.realpath('/usr/lib/python2.7/site-packages'))
sys.path.insert(
0,
os.path.realpath('/usr/lib/python2.7/site-packages/vnc_cfg_api_server/'))
from vnc_api.vnc_api import *
from vnc_api.common import exceptions as vnc_exceptions
import vnc_cfg_api_server
from svc_monitor import svc_monitor
from novaclient import client as nc
from novaclient import exceptions as nc_exc
class ServiceInstanceCmd(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._proj_fq_name = [self._args.domain_name, self._args.proj_name]
self._si_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.instance_name]
self._st_fq_name = [self._args.domain_name, self._args.template_name]
self._domain_fq_name = [self._args.domain_name]
if self._args.left_vn:
self._left_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.left_vn]
if self._args.right_vn:
self._right_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.right_vn]
if self._args.mgmt_vn:
self._mgmt_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.mgmt_vn]
self._novaclient_init()
self._vnc_lib = VncApi('u', 'p',
api_server_host=self._args.api_server_ip,
api_server_port=self._args.api_server_port)
# end __init__
def _parse_args(self, args_str):
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
global_defaults = {
'domain_name': 'default-domain',
'template_name': None,
'instance_name': None,
'proj_name': 'demo',
'mgmt_vn': None,
'left_vn': None,
'right_vn': None,
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
}
if not args.conf_file:
args.conf_file = '/etc/contrail/contrail-svc-monitor.conf'
config = ConfigParser.SafeConfigParser()
ret = config.read([args.conf_file])
if args.conf_file not in ret:
print "Error: Unable to read the config file %s" % args.conf_file
sys.exit(-1)
global_defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**global_defaults)
subparsers = parser.add_subparsers()
create_parser = subparsers.add_parser('add')
create_parser.add_argument(
"instance_name", help="service instance name")
create_parser.add_argument(
"template_name", help="service template name")
create_parser.add_argument(
"--proj_name", help="name of project [default: demo]")
create_parser.add_argument(
"--mgmt_vn", help="name of management vn [default: none]")
create_parser.add_argument(
"--left_vn", help="name of left vn [default: none]")
create_parser.add_argument(
"--right_vn", help="name of right vn [default: none]")
create_parser.add_argument("--max_instances", type=int, default=1,
help="max instances to launch [default: 1]")
create_parser.add_argument(
"--auto_scale", action="store_true", default=False,
help="enable auto-scale from 1 to max_instances")
create_parser.set_defaults(func=self.create_si)
delete_parser = subparsers.add_parser('del')
delete_parser.add_argument(
"instance_name", help="service instance name")
delete_parser.add_argument(
"template_name", help="service instance name")
delete_parser.add_argument(
"--proj_name", help="name of project [default: demo]")
delete_parser.set_defaults(func=self.delete_si)
list_parser = subparsers.add_parser('list')
list_parser.set_defaults(func=self.list_si)
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def _novaclient_init(self):
self._nova = nc.Client(
'2', username='admin',
project_id=self._args.proj_name, api_key='contrail123',
auth_url='http://' + self._args.ifmap_server_ip + ':5000/v2.0')
# end _novaclient_init
# create service instance
def create_si(self):
# get service template
try:
st_obj = self._vnc_lib.service_template_read(
fq_name=self._st_fq_name)
st_prop = st_obj.get_service_template_properties()
if st_prop is None:
print "Error: Service template %s properties not found"\
% (self._args.template_name)
return
except NoIdError:
print "Error: Service template %s not found"\
% (self._args.template_name)
return
if st_prop.get_image_name():
# check if image exists
try:
self._nova.images.find(name=st_prop.get_image_name())
except nc_exc.NotFound:
print "Error: Image %s not found" % (st_prop.get_image_name())
return
# check if passed VNs exist
if self._args.left_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._left_vn_fq_name)
except NoIdError:
print "Error: Left VN %s not found" % (self._left_vn_fq_name)
return
if self._args.right_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._right_vn_fq_name)
except NoIdError:
print "Error: Right VN %s not found" % (self._right_vn_fq_name)
return
if self._args.mgmt_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._mgmt_vn_fq_name)
except NoIdError:
print "Error: Management VN %s not found" % (self._mgmt_vn_fq_name)
return
else:
self._mgmt_vn_fq_name = []
# create si
print "Creating service instance %s" % (self._args.instance_name)
project = self._vnc_lib.project_read(fq_name=self._proj_fq_name)
try:
si_obj = self._vnc_lib.service_instance_read(
fq_name=self._si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = ServiceInstance(
self._args.instance_name, parent_obj=project)
si_uuid = self._vnc_lib.service_instance_create(si_obj)
si_prop = ServiceInstanceType(
left_virtual_network=':'.join(self._left_vn_fq_name),
management_virtual_network=':'.join(self._mgmt_vn_fq_name),
right_virtual_network=':'.join(self._right_vn_fq_name))
# set scale out
scale_out = ServiceScaleOutType(
max_instances=self._args.max_instances,
auto_scale=self._args.auto_scale)
si_prop.set_scale_out(scale_out)
si_obj.set_service_instance_properties(si_prop)
st_obj = self._vnc_lib.service_template_read(id=st_obj.uuid)
si_obj.set_service_template(st_obj)
self._vnc_lib.service_instance_update(si_obj)
return si_uuid
# end create_si
def delete_si(self):
try:
print "Deleting service instance %s" % (self._args.instance_name)
self._vnc_lib.service_instance_delete(fq_name=self._si_fq_name)
except NoIdError:
return
# delete_si
def list_si(self):
print "List service instances"
instances = self._vnc_lib.service_instances_list()
pprint.pprint(instances)
# list_si
# end class ServiceInstanceCmd
def main(args_str=None):
si = ServiceInstanceCmd(args_str)
si._args.func()
# end main
if __name__ == "__main__":
main()
| apache-2.0 |
sguotciq/django-test | mysite/mysite/urls.py | 1 | 1262 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
urlpatterns = [
# Examples:
url(r'^$', 'learn.views.index', name='default'),# homepage
url(r'^home/$', 'learn.views.home', name='home'),# homepage
url(r'^add/$', 'learn.views.add', name='add'),#add test
url(r'^add/(\d+)/(\d+)/$', 'learn.views.add2', name='add2'),#add test2
url(r'^admin/', include(admin.site.urls)),
]
# urlpatterns = patterns('',
# # Examples:
# url(r'^$', 'learn.views.index', name='home'),# Notice this line
# # url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
# ) | gpl-2.0 |
jamespacileo/django-france | tests/regressiontests/forms/tests/formsets.py | 50 | 41032 | # -*- coding: utf-8 -*-
from django.forms import Form, CharField, IntegerField, ValidationError, DateField
from django.forms.formsets import formset_factory, BaseFormSet
from django.utils.unittest import TestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm,
formset=BaseFavoriteDrinksFormSet, extra=3)
class FormsFormsetTestCase(TestCase):
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertEqual(str(formset), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="text" name="choices-0-votes" /></td></tr>""")
# On thing to note is that there needs to be a special value in the data. This
# value tells the FormSet how many forms were displayed so it can tell how
# many forms it needs to clean and validate. You could use javascript to create
# new forms on the client side, but they won't get validated unless you increment
# the TOTAL_FORMS field appropriately.
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}])
# If a FormSet was not passed any data, its is_valid method should return False.
formset = ChoiceFormSet()
self.assertFalse(formset.is_valid())
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': [u'This field is required.']}])
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': u'Calexico', 'votes': 100}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>""")
# Let's simulate what would happen if we submitted this form.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': [u'This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '', # deleted value
'choices-0-votes': '', # deleted value
'choices-1-choice': '',
'choices-1-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': [u'This field is required.'], 'choice': [u'This field is required.']}, {}])
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="text" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>""")
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}, {}, {}])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': [u'This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
initial = [{'choice': u'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="text" name="choices-3-votes" /></li>""")
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertEqual(formset.empty_form.as_ul(), """<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="text" name="choices-__prefix__-votes" /></li>""")
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': u'Calexico', 'votes': 100}, {'choice': u'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>""")
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'DELETE': False, 'choice': u'Calexico'}, {'votes': 900, 'DELETE': True, 'choice': u'Fergie'}, {}])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'choice': u'Fergie'}])
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': u'', 'form-0-DELETE': u'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add a integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': u'Calexico', 'votes': 100}, {'choice': u'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="text" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="text" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Order: <input type="text" name="choices-2-ORDER" /></li>""")
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': u'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': u'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': u'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': u'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': u'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': u'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': u'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': u'Calexico', 'votes': 100},
{'choice': u'Fergie', 'votes': 900},
{'choice': u'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="text" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="text" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="text" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="text" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="text" name="choices-3-votes" /></li>
<li>Order: <input type="text" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>""")
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': u'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': u'Calexico'},
])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': u'Fergie'}])
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': u'',
'form-0-DELETE': u'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take its default value of None, i.e. unlimited
# number of forms, only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>""")
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
# Ensure that max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>""")
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take its default value of None, i.e. unlimited
# number of forms, only controlled by the values of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, even if extra and initial
# are specified.
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
def test_more_initial_than_max_num(self):
# More initial forms than max_num will result in only the first max_num of
# them to be displayed with no extra forms.
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" value="Bloody Mary" id="id_form-1-name" /></td></tr>""")
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [u'You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
try:
formset[3]
self.fail('Requesting an invalid formset index should raise an exception')
except IndexError:
pass
# Formets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
for form in reversed(self.forms):
yield form
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FormsetAsFooTests(TestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_table(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="text" name="choices-0-votes" value="100" /></td></tr>""")
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_p(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="text" name="choices-0-votes" value="100" /></p>""")
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_ul(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>""")
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(TestCase):
def test_no_data_raises_validation_error(self):
self.assertRaises(ValidationError, ArticleFormSet, {})
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': u'1',
'form-INITIAL_FORMS': u'0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_cought_by_formset(self):
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-0-title': u'Test',
'form-0-pub_date': u'1904-06-16',
'form-1-title': u'Test',
'form-1-pub_date': u'', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': [u'This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': u'1',
'form-INITIAL_FORMS': u'0',
'form-0-title': u'Test',
'form-0-pub_date': u'1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = []
empty_forms.append(unbound_formset.empty_form)
empty_forms.append(bound_formset.empty_form)
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(TestCase):
"Test that an empty formset still calls clean()"
def test_empty_formset_is_valid(self):
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'0'},prefix="form")
formset2 = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'1', 'form-0-name':'bah' },prefix="form")
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
| bsd-3-clause |
SDRC-AUV/ardusub | Tools/scripts/generate-manifest.py | 22 | 11584 | #!/usr/bin/python
from __future__ import print_function
import sys
import json
import os
import re
class Firmware():
def __init__(self, date=None, platform=None, vehicletype=None, filepath=None, git_sha=None, frame=None):
self.atts = dict()
self.atts["date"] = date
self.atts["platform"] = platform
self.atts["vehicletype"] = vehicletype
self.atts["filepath"] = filepath
self.atts["git_sha"] = git_sha
self.atts["frame"] = frame
self.atts["release-type"] = None
self.atts["firmware-version"] = None
def __getitem__(self,what):
return self.atts[what]
def __setitem__(self,name,value):
self.atts[name] = value
class ManifestGenerator():
'''Return a JSON string describing "binary" directory contents under basedir'''
def __init__(self,basedir,baseurl):
self.basedir = basedir
self.baseurl = baseurl
def frame_map(self, frame):
'''translate from ArduPilot frame type terminology into mavlink terminology'''
frame_to_mavlink_dict = {
"quad": "QUADROTOR",
"hexa": "HEXAROTOR",
"y6": "ARDUPILOT_Y6",
"tri": "TRICOPTER",
"octa": "OCTOROTOR",
"octa-quad": "ARDUPILOT_OCTAQUAD",
"heli": "HELICOPTER",
"Plane": "FIXED_WING",
"AntennaTracker": "ANTENNA_TRACKER",
"Rover": "GROUND_ROVER",
"PX4IO": "ARDUPILOT_PX4IO",
}
if frame in frame_to_mavlink_dict:
return frame_to_mavlink_dict[frame]
return frame
def releasetype_map(self, releasetype):
'''translate from ArduPilot release type terminology into mavlink terminology'''
if releasetype == 'stable': return 'OFFICIAL'
return releasetype.upper()
def looks_like_binaries_directory(self, dir):
'''returns True if dir looks like it is a build_binaries.sh output directory'''
for entry in os.listdir(dir):
if entry in {"AntennaTracker", "Copter", "Plane", "Rover"}:
return True
return False
def git_sha_from_git_version(self, filepath):
'''parses get-version.txt (as emitted by build_binaries.sh, returns git sha from it'''
content = open(filepath).read()
sha_regex = re.compile("commit (?P<sha>[0-9a-f]+)")
m = sha_regex.search(content)
if m is None:
raise Exception("filepath (%s) does not appear to contain a git sha" % (filepath,))
return m.group("sha")
def add_firmware_data_from_dir(self, dir, firmware_data, vehicletype, releasetype="dev"):
'''accumulate additional information about firmwares from a directory'''
platform_frame_regex = re.compile("(?P<board>PX4|navio|pxf)(-(?P<frame>.+))?")
variant_firmware_regex = re.compile("[^-]+-(?P<variant>v\d+)[.px4]")
for platformdir in os.listdir(dir):
some_dir = os.path.join(dir, platformdir)
try:
git_sha = self.git_sha_from_git_version(os.path.join(some_dir, "git-version.txt"))
except Exception as e:
continue
try:
firmware_version = open(os.path.join(some_dir, "firmware-version.txt")).read()
firmware_version = firmware_version.strip()
except Exception as e:
# this exception is swallowed.... the current archive
# is incomplete.
firmware_version = None
m = platform_frame_regex.match(platformdir)
if m is not None:
# the model type (quad/tri) is
# encoded in the platform name
# (e.g. navio-octa)
platform = m.group("board") # e.g. navio
frame = m.group("frame") # e.g. octa
if frame is None:
frame = vehicletype
else:
frame = vehicletype # e.g. Plane
platform = platformdir # e.g. apm2
for file in os.listdir(some_dir):
if file == "git-version.txt":
continue
if file == "firmware-version.txt":
continue
m = variant_firmware_regex.match(file)
if m:
# the platform variant is
# encoded in the firmware filename
# (e.g. the "v1" in
# ArduCopter-v1.px4)
variant = m.group("variant")
file_platform = "-".join([platform,variant])
else:
file_platform = platform
firmware_format = "".join(file.split(".")[-1:])
if not vehicletype in firmware_data:
firmware_data[vehicletype] = dict()
if not file_platform in firmware_data[vehicletype]:
firmware_data[vehicletype][file_platform] = dict()
if not git_sha in firmware_data[vehicletype][file_platform]:
firmware_data[vehicletype][file_platform][git_sha] = dict()
if not firmware_format in firmware_data[vehicletype][file_platform][git_sha]:
firmware_data[vehicletype][file_platform][git_sha][firmware_format] = dict()
if not frame in firmware_data[vehicletype][file_platform][git_sha][firmware_format]:
firmware_data[vehicletype][file_platform][git_sha][firmware_format][frame] = Firmware()
firmware = firmware_data[vehicletype][file_platform][git_sha][firmware_format][frame]
# translate from supplied "release type" into both a
# "latest" flag andan actual release type. Also sort
# out which filepath we should use:
firmware["latest" ] = 0
if releasetype == "dev":
if firmware["filepath"] is None:
firmware["filepath"] = os.path.join(some_dir, file)
if firmware["release-type"] is None:
firmware["release-type"] = "dev"
elif releasetype == "latest":
firmware["latest"] = 1
firmware["filepath"] = os.path.join(some_dir, file)
if firmware["release-type"] is None:
firmware["release-type"] = "dev"
else:
if (not firmware["latest"]):
firmware["filepath"] = os.path.join(some_dir, file)
firmware["release-type"] = releasetype
firmware["platform"] = file_platform
firmware["vehicletype"] = vehicletype
firmware["git_sha"] = git_sha
firmware["frame"] = frame
firmware["timestamp"] = os.path.getctime(firmware["filepath"])
firmware["format"] = firmware_format
firmware["firmware-version"] = firmware_version
def xfirmwares_to_firmwares(self, xfirmwares):
'''takes hash structure of firmwares, returns list of them'''
if isinstance(xfirmwares, dict):
ret = []
for value in xfirmwares.values():
o = self.xfirmwares_to_firmwares(value)
for oo in o:
ret.append(oo)
return ret
else:
return [xfirmwares]
known_release_types = {
"beta" : 1,
"latest" : 1,
"stable" : 1
}
def parse_fw_version(self, version):
(version_numbers,release_type) = version.split("-")
(major,minor,patch) = version_numbers.split(".")
return (major,minor,patch,version)
def walk_directory(self, basedir):
'''walks directory structure created by build_binaries, returns Python structure representing releases in that structure'''
year_month_regex = re.compile("(?P<year>\d{4})-(?P<month>\d{2})")
xfirmwares = dict()
# used to listdir basedir here, but since this is also a web document root, there's a lot of other stuff accumulated...
vehicletypes = [ 'AntennaTracker', 'Copter', 'Plane', 'PX4IO', 'Rover' ]
for vehicletype in vehicletypes:
vdir = os.listdir(os.path.join(basedir, vehicletype))
for firstlevel in vdir:
if year_month_regex.match(firstlevel):
# this is a dated directory e.g. binaries/Copter/2016-02
year_month_path = os.path.join(basedir, vehicletype, firstlevel)
for fulldate in os.listdir(year_month_path):
self.add_firmware_data_from_dir(os.path.join(year_month_path, fulldate), xfirmwares, vehicletype)
else:
# assume this is a release directory such as
# "beta", or the "latest" directory (treated as a
# release and handled specially later)
tag = firstlevel
if tag not in self.known_release_types:
print("Unknown tag (%s) in directory (%s)" %
(tag, vdir))
tag_path = os.path.join(basedir, vehicletype, tag)
self.add_firmware_data_from_dir(tag_path, xfirmwares, vehicletype, releasetype=tag)
firmwares = self.xfirmwares_to_firmwares(xfirmwares)
# convert from ardupilot-naming conventions to common JSON format:
firmware_json = []
for firmware in firmwares:
filepath = firmware["filepath"]
# replace the base directory with the base URL
urlifier = re.compile("^" + re.escape(basedir))
url = re.sub(urlifier, self.baseurl, filepath)
some_json = dict({
"mav-autopilot": "ARDUPILOTMEGA",
# "vehicletype": firmware["vehicletype"],
"platform": firmware["platform"],
"git-sha": firmware["git_sha"],
"url": url,
"mav-type": self.frame_map(firmware["frame"]),
"mav-firmware-version-type": self.releasetype_map(firmware["release-type"]),
"latest": firmware["latest"],
"format": firmware["format"],
})
if firmware["firmware-version"]:
(major,minor,patch,release_type) = self.parse_fw_version(firmware["firmware-version"])
some_json["mav-firmware-version"] = ".".join([major,minor,patch])
some_json["mav-firmware-version-major"] = major
some_json["mav-firmware-version-minor"] = minor
some_json["mav-firmware-version-patch"] = patch
firmware_json.append(some_json)
ret = {
"format-version": "1.0.0", # semantic versioning
"firmware": firmware_json
}
return ret
def json(self):
'''walk directory supplied in constructor, return json string'''
if not self.looks_like_binaries_directory(self.basedir):
print("Warning: this does not look like a binaries directory", file=sys.stderr)
structure = self.walk_directory(self.basedir)
return json.dumps(structure, indent=4)
def usage():
return '''Usage:
generate-manifest.py basedir baseurl'''
if __name__ == "__main__":
if len(sys.argv) != 3:
print(usage())
sys.exit(1)
generator = ManifestGenerator(sys.argv[1], sys.argv[2])
print(generator.json())
| gpl-3.0 |
pyload/pyload | src/pyload/webui/app/blueprints/cnl_blueprint.py | 1 | 5834 | # -*- coding: utf-8 -*-
import os
from base64 import standard_b64decode
from functools import wraps
from urllib.parse import unquote
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import flask
from flask.json import jsonify
from pyload.core.api import Destination
from pyload.core.utils.convert import to_str
from pyload.core.utils.misc import eval_js
#: url_prefix here is intentional since it should not be affected py path prefix
bp = flask.Blueprint("flash", __name__, url_prefix="/")
#: decorator
def local_check(func):
@wraps(func)
def wrapper(*args, **kwargs):
remote_addr = flask.request.environ.get("REMOTE_ADDR", "0")
http_host = flask.request.environ.get("HTTP_HOST", "0")
if remote_addr in ("127.0.0.1", "::ffff:127.0.0.1", "::1", "localhost") or http_host in (
"127.0.0.1:9666",
"[::1]:9666",
):
return func(*args, **kwargs)
else:
return "Forbidden", 403
return wrapper
@bp.after_request
def add_cors(response):
response.headers.update({
'Access-Control-Max-Age': 1800,
'Access-Control-Allow-Origin': "*",
'Access-Control-Allow-Methods': "OPTIONS, GET, POST"
})
return response
@bp.route("/flash/", methods=["GET", "POST"], endpoint="index")
@bp.route("/flash/<id>", methods=["GET", "POST"], endpoint="index")
@local_check
def index(id="0"):
return "JDownloader\r\n"
@bp.route("/flash/add", methods=["POST"], endpoint="add")
@local_check
def add():
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
urls = [url for url in flask.request.form["urls"].replace(' ', '\n').split("\n") if url.strip()]
if not urls:
return jsonify(False)
api = flask.current_app.config["PYLOAD_API"]
try:
if package:
api.add_package(package, urls, Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.COLLECTOR)
except Exception as e:
return "failed " + e.args[0] + "\r\n"
return "success\r\n"
@bp.route("/flash/addcrypted", methods=["POST"], endpoint="addcrypted")
@local_check
def addcrypted():
api = flask.current_app.config["PYLOAD_API"]
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
dl_path = api.get_config_value("general", "storage_folder")
dlc_path = os.path.join(
dl_path, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc"
)
dlc = flask.request.form["crypted"].replace(" ", "+")
with open(dlc_path, mode="wb") as fp:
fp.write(dlc)
try:
api.add_package(package, [dlc_path], Destination.COLLECTOR)
except Exception:
flask.abort(500)
else:
return "success\r\n"
@bp.route("/flash/addcrypted2", methods=["POST"], endpoint="addcrypted2")
@local_check
def addcrypted2():
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
crypted = flask.request.form["crypted"]
jk = flask.request.form["jk"]
crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
jk = eval_js(f"{jk} f()")
try:
IV = key = bytes.fromhex(jk)
except Exception:
return "Could not decrypt key", 500
cipher = Cipher(
algorithms.AES(key), modes.CBC(IV), backend=default_backend()
)
decryptor = cipher.decryptor()
decrypted = decryptor.update(crypted) + decryptor.finalize()
urls = to_str(decrypted).replace("\x00", "").replace("\r", "").split("\n")
urls = [url for url in urls if url.strip()]
api = flask.current_app.config["PYLOAD_API"]
try:
if package:
api.add_package(package, urls, Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.COLLECTOR)
except Exception:
return "failed can't add", 500
else:
return "success\r\n"
@bp.route("/flashgot", methods=["POST"], endpoint="flashgot")
@bp.route("/flashgot_pyload", methods=["POST"], endpoint="flashgot")
@local_check
def flashgot():
if flask.request.referrer not in (
"http://localhost:9666/flashgot",
"http://127.0.0.1:9666/flashgot",
):
flask.abort(500)
package = flask.request.form.get("package")
urls = [url for url in flask.request.form["urls"].split("\n") if url.strip()]
# folder = flask.request.form.get('dir', None)
autostart = int(flask.request.form.get("autostart", 0))
api = flask.current_app.config["PYLOAD_API"]
if package:
api.add_package(package, urls, Destination.QUEUE if autostart else Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.QUEUE if autostart else Destination.COLLECTOR)
@bp.route("/crossdomain.xml", endpoint="crossdomain")
@local_check
def crossdomain():
rep = '<?xml version="1.0"?>\n'
rep += '<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">\n'
rep += "<cross-domain-policy>\n"
rep += '<allow-access-from domain="*" />\n'
rep += "</cross-domain-policy>"
return rep
@bp.route("/flash/checkSupportForUrl", methods=["POST"], endpoint="checksupport")
@local_check
def checksupport():
api = flask.current_app.config["PYLOAD_API"]
url = flask.request.form["url"]
res = api.check_urls([url])
supported = not res[0][1] is None
return str(supported).lower()
@bp.route("/jdcheck.js", endpoint="jdcheck")
@local_check
def jdcheck():
rep = "jdownloader=true;\r\n"
rep += "var version='42707';\r\n"
return rep
| agpl-3.0 |
bckwltn/SickRage | lib/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| gpl-3.0 |
drinkssu/YourVoiceAlarmBackend | lib/flask/wrappers.py | 773 | 6709 | # -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from .debughelpers import attach_enctype_error_multidict
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: the internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
#: if matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# switched by the request context until 1.0 to opt in deprecated
# module functionality
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the `MAX_CONTENT_LENGTH` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be `None`.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
:param force: if set to `True` the mimetype is ignored.
:param silent: if set to `False` this method will fail silently
and return `False`.
:param cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# in debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
| apache-2.0 |
gabrielaraujof/beets | beetsplug/permissions.py | 24 | 3104 | from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(lib.directory,
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
| mit |
thaim/ansible | test/units/modules/source_control/test_bitbucket_access_key.py | 37 | 13671 | from ansible.module_utils.source_control.bitbucket import BitbucketHelper
from ansible.modules.source_control.bitbucket import bitbucket_access_key
from units.compat import unittest
from units.compat.mock import patch
from units.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
class TestBucketAccessKeyModule(ModuleTestCase):
def setUp(self):
super(TestBucketAccessKeyModule, self).setUp()
self.module = bitbucket_access_key
def test_missing_key_with_present_state(self):
with self.assertRaises(AnsibleFailJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'key name',
'state': 'present',
})
self.module.main()
self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key'])
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_create_deploy_key(self, *args):
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'public_key',
'label': 'key name',
'state': 'present',
})
self.module.main()
self.assertEqual(create_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_create_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'public_key',
'label': 'key name',
'state': 'present',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_update_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 1)
self.assertEqual(create_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "new public key",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_dont_update_same_value(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_update_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_delete_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_delete_absent_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_delete_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
if __name__ == '__main__':
unittest.main()
| mit |
wangjun/wakatime | wakatime/packages/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| bsd-3-clause |
SoundGoof/NIPAP | tests/nipapbase.py | 7 | 39082 | #!/usr/bin/env python
# vim: et :
import logging
import unittest
import sys
sys.path.insert(0, '../nipap/')
from nipap.backend import Nipap, NipapError, NipapInputError, NipapMissingInputError, NipapExtraneousInputError, NipapValueError
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
class NipapTest(unittest.TestCase):
""" Tests the NIPAP class
"""
nipap = None
def setUp(self):
""" Better start from a clean slate!
"""
cfg = NipapConfig('/etc/nipap/nipap.conf')
self.nipap = Nipap()
# create dummy auth object
# As the authentication is performed before the query hits the Nipap
# class, it does not matter what user we use here
self.auth = SqliteAuth('local', 'unittest', 'unittest', 'unittest')
self.auth.authenticated_as = 'unittest'
self.auth.full_name = 'Unit test'
self.nipap._execute("TRUNCATE ip_net_plan, ip_net_pool, ip_net_vrf, ip_net_log, ip_net_asn")
self.schema_attrs = {
'name': 'test-schema1',
'description': 'Test schema numero uno!'
}
self.schema_attrs['id'] = self.nipap.add_schema(self.auth, self.schema_attrs)
self.schema_attrs2 = {
'name': 'test-schema2',
'description': 'Test schema numero dos!'
}
self.schema_attrs2['id'] = self.nipap.add_schema(self.auth, self.schema_attrs2)
self.pool_attrs = {
'name': 'test-pool1',
'description': 'Test pool numero uno!',
'default_type': 'assignment',
'ipv4_default_prefix_length': 30,
'ipv6_default_prefix_length': 112
}
self.pool_attrs['id'] = self.nipap.add_pool(self.auth, {'id': self.schema_attrs['id']}, self.pool_attrs)
self.prefix_attrs1 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.3.0/24',
'type': 'assignment',
'description': ''
}
self.prefix_attrs1['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs1)
self.prefix_attrs = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.3.1/32',
'type': 'host',
'description': 'Test prefix numero uno!'
}
self.prefix_attrs['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs)
self.prefix_attrs2 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.2.0/23',
'type': 'reservation',
'description': ''
}
self.prefix_attrs2['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs2)
self.prefix_attrs3 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.0.0/16',
'type': 'reservation',
'description': ''
}
self.prefix_attrs3['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs3)
self.prefix_attrs4 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.0.0/17',
'type': 'reservation',
'description': ''
}
self.prefix_attrs4['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs2['id']}, self.prefix_attrs4)
self.prefix6_attrs1 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::/112',
'type': 'assignment',
'description': ''
}
self.prefix6_attrs1['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs1)
self.prefix6_attrs = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::1/128',
'type': 'host',
'description': 'Test prefix numero uno!'
}
self.prefix6_attrs['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs)
self.prefix6_attrs2 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::/64',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs2['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs2)
self.prefix6_attrs3 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:0::/48',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs3['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs3)
self.prefix6_attrs4 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:0::/56',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs4['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs2['id']}, self.prefix6_attrs4)
def test_schema_basic(self):
""" Basic schema test
1. Add a new schema
2. List with filters to get newly created schema
3. Verify listed schema coincides with input args for added schema
4. Remove schema
"""
attrs = {
'name': 'test-schema-wrong',
'description': 'A simple test schema with incorrect name!'
}
attrs['id'] = self.nipap.add_schema(self.auth, attrs)
schema = self.nipap.list_schema(self.auth, { 'id': attrs['id'] })
for a in attrs:
self.assertEqual(schema[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_schema_add_crap_input(self):
""" Try to input junk into add_schema and expect error
"""
attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!',
'crap': 'this is just some crap'
}
# missing everything
self.assertRaises(NipapMissingInputError, self.nipap.add_schema, self.auth, { })
# missing description
self.assertRaises(NipapMissingInputError, self.nipap.add_schema, self.auth, { 'name': 'crapson' })
# have required and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap.add_schema, self.auth, attrs)
def test_expand_schema_spec(self):
""" Test the expand_schema_spec()
The _expand_schema_spec() function is used throughout the schema
functions to expand the schema specification input and so we test
the separately.
"""
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, 'string')
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, 1)
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, [])
# missing keys
self.assertRaises(NipapMissingInputError, self.nipap._expand_schema_spec, { })
# crap key
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'crap': self.schema_attrs['name'] })
# required keys and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'name': self.schema_attrs['name'], 'crap': 'crap' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_schema_spec, { 'id': '3' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_schema_spec, { 'name': 3 })
# both id and name
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'id': 3, 'name': '3' })
# proper key - id
where, params = self.nipap._expand_schema_spec({ 'id': 3 })
self.assertEqual(where, 'id = %(spec_id)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_id': 3}, "Improperly expanded params dict")
# proper spec - name
where, params = self.nipap._expand_schema_spec({ 'name': 'test' })
def test_schema_edit_crap_input(self):
""" Try to input junk into edit_schema and expect error
"""
attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!'
}
crap_attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!',
'crap': 'this is just some crap'
}
# spec is tested elsewhere, just test attrs part
self.assertRaises(NipapExtraneousInputError, self.nipap.edit_schema, self.auth, { 'name': self.schema_attrs['name'] }, crap_attrs)
def test_schema_list_crap_input(self):
""" Try to input junk into list_schema and expect error
"""
# TODO: what do we really expect?
self.assertRaises(NipapExtraneousInputError, self.nipap.list_schema, self.auth, { 'crap': 'crap crap' })
def test_schema_dupe(self):
""" Check so we can't create duplicate schemas
There are unique indices in the database that should prevent us
from creating duplicate schema (ie, with the same name).
"""
schema_attrs = {
'name': 'test-schema-dupe',
'description': 'Testing dupe'
}
self.nipap.add_schema(self.auth, schema_attrs)
self.assertRaises(NipapDuplicateError, self.nipap.add_schema, self.auth, schema_attrs)
def test_schema_rename(self):
""" Rename a schema
Uses the edit_schema() functionality to rename our previously
created and incorrectly named schema so it hereafter has the
correct name. Also tests the list_schema() functionality since we
use that to list the modified schema.
"""
spec = { 'name': 'test-schema1' }
attrs = {
'name': 'test-schema',
'description': 'A simple test schema with correct name!'
}
self.nipap.edit_schema(self.auth, spec, attrs)
# check that search for old record doesn't return anything
schema = self.nipap.list_schema(self.auth, spec)
self.assertEqual(schema, [], 'Old entry still exists')
schema = self.nipap.list_schema(self.auth, { 'name': 'test-schema' })
for a in attrs:
self.assertEqual(schema[0][a], attrs[a], 'Modified schema differ from listed on attribute: ' + a)
def test_schema_remove(self):
""" Remove a schema
Remove the schema previously modified and make sure it's not there.
"""
spec = { 'name': 'test-schema' }
self.nipap.remove_schema(self.auth, spec)
# check that search for old record doesn't return anything
schema = self.nipap.list_schema(self.auth, spec)
self.assertEqual(schema, [], 'Old entry still exists')
def test_expand_pool_spec(self):
""" Test the function which expands pool spec to SQL.
"""
schema = {'id': self.schema_attrs['id']}
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, 'string')
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, 1)
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, [])
# missing keys
self.assertRaises(NipapMissingInputError, self.nipap._expand_pool_spec, { })
# crap key
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'crap': self.pool_attrs['name'] })
# required keys and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'id': self.pool_attrs['id'], 'schema': self.schema_attrs['id'], 'crap': 'crap' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_pool_spec, { 'id': '3', 'schema': self.schema_attrs['id'] })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_pool_spec, { 'name': 3, 'schema': self.schema_attrs['id'] })
# both id and name
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'id': 3, 'name': '3', 'schema': self.schema_attrs['id'] })
# proper key - id
where, params = self.nipap._expand_pool_spec({ 'id': 3, 'schema': self.schema_attrs['id'] })
self.assertEqual(where, 'po.id = %(spec_id)s AND po.schema = %(spec_schema)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_id': 3, 'spec_schema': self.schema_attrs['id']}, "Improperly expanded params dict")
# proper spec - name
where, params = self.nipap._expand_pool_spec({ 'name': 'test', 'schema': self.schema_attrs['id'] })
self.assertEqual(where, 'po.name = %(spec_name)s AND po.schema = %(spec_schema)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_name': 'test', 'spec_schema': self.schema_attrs['id'] }, "Improperly expanded params dict")
def test_pool_add1(self):
""" Add a pool and check it's there using list functions
Refer to schema by id
"""
attrs = {
'name': 'test-pool-wrong',
'description': 'A simple test pool with incorrect name!',
'default_type': 'reservation',
'ipv4_default_prefix_length': 30,
'ipv6_default_prefix_length': 112
}
schema = {'id': self.schema_attrs['id']}
pool_id = self.nipap.add_pool(self.auth, schema, attrs)
pool = self.nipap.list_pool(self.auth, schema, { 'id': pool_id })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: %s %s!=%s' % (a, attrs[a], pool[0][a]))
def test_pool_add2(self):
""" Add a pool and check it's there using list functions
Refer to schema by name
"""
schema = {'id': self.schema_attrs['id']}
attrs = {
'name': 'test-pool-wrong',
'default_type': 'reservation',
'description': 'A simple test pool with incorrect name!'
}
pool_id = self.nipap.add_pool(self.auth, schema, attrs)
pool = self.nipap.list_pool(self.auth, schema, { 'id': pool_id })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_edit_pool_by_name(self):
""" Try to rename a pool using edit_pool() function
Pool is not uniquely identified (empty spec) so this should raise an error
"""
schema = {'id': self.schema_attrs['id']}
spec = { }
attrs = {
'name': self.pool_attrs['name'],
'default_type': 'assignment',
'description': 'A simple test pool with correct name!'
}
self.assertRaises(NipapInputError, self.nipap.edit_pool, self.auth, schema, spec, attrs)
def test_edit_pool(self):
""" Rename a pool using edit_pool() function
"""
schema = {'id': self.schema_attrs['id']}
spec = { 'id': self.pool_attrs['id'] }
attrs = {
'name': 'test-pool',
'default_type': 'assignment',
'description': 'A simple test pool with correct name!',
'ipv4_default_prefix_length': 32,
'ipv6_default_prefix_length': 128
}
self.nipap.edit_pool(self.auth, schema, spec, attrs)
# check that search for old record doesn't return anything
pool = self.nipap.list_pool(self.auth, schema, { 'name': self.pool_attrs['name'] })
self.assertEqual(pool, [], 'Old entry still exists')
pool = self.nipap.list_pool(self.auth, schema, { 'name': attrs['name'] })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_remove_pool_by_id(self):
""" Remove a pool by id
"""
schema = {'id': self.schema_attrs['id']}
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Record must exist before we can delete it')
for a in self.pool_attrs:
self.assertEqual(pool[0][a], self.pool_attrs[a], 'Listed attribute differ from original')
# remove the pool
self.nipap.remove_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# check that search for old record doesn't return anything
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
self.assertEqual(pool, [], 'Old entry still exists')
def test_prefix_in_a_pool(self):
""" Add prefixes to a poll and list!
"""
schema = {'id': self.schema_attrs['id']}
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'1.2.2.0/32',
'1.2.2.1/32',
'1.2.2.2/32',
'1.2.2.3/32',
'1.2.2.4/32',
'1.2.2.5/32'
]
for p in pfxs:
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': p,
'type': 'host',
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# list again
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
self.assertNotEqual(pool[0], [], 'Pool must exist!')
self.assertEqual(set(pfxs), set(pool[0]['prefixes']), 'Returned prefixes do not match added ones')
def test_prefix_basic(self):
""" Test basic prefix functions
"""
schema = {'id': self.schema_attrs['id']}
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '1.3.3.7/32',
'type': 'host',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
prefix = self.nipap.list_prefix(self.auth, schema, { 'prefix': prefix_attrs['prefix'] })
for a in prefix_attrs:
self.assertEqual(prefix[0][a], prefix_attrs[a], 'Added object differ from listed on attribute: ' + a)
# fetch many prefixes - all in a schema
prefix = self.nipap.list_prefix(self.auth, schema, {})
self.assertNotEqual(len(prefix), 0, 'Found 0 prefixes in schema ' + self.schema_attrs['name'])
def test_add_prefix(self):
""" Test add_prefix in a bit more detail
"""
schema = {'id': self.schema_attrs['id']}
# we need a bloody pool first!
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'10.0.0.0/24',
'10.0.1.0/24',
'10.0.2.0/24',
'10.0.3.0/24',
'10.0.4.0/24'
]
for p in pfxs:
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': p,
'type': 'reservation',
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# get an address based on from-prefix
prefix_attrs = {
'type': 'assignment',
'authoritative_source': 'nipap-test',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
res = self.nipap.add_prefix(self.auth, schema, prefix_attrs, { 'from-prefix': ['10.0.0.0/24'], 'prefix_length': 30 })
p = self.nipap.list_prefix(self.auth, schema, { 'id': res })
self.assertEqual(p[0]['prefix'], '10.0.0.0/30', "New prefix differ from what it should be!")
self.nipap.add_schema(self.auth, { 'name': 'testtest', 'description': 'another test schema!' })
# pass different schemas in attr and args
# TODO: Find something similar?
#self.assertRaises(NipapInputError, self.nipap.add_prefix, schema, { 'authoritative_source': 'nipap-test', 'description': 'tjong' }, { 'from-prefix': ['10.0.0.0/24'], 'prefix_length': 30 })
def test_prefix_search_simple(self):
""" Test the simple prefix search function.
"""
schema = {'id': self.schema_attrs['id']}
# First, perform e few tests to verify search string expansion.
query_keys = dict()
query_keys['testing testing'] = "description"
query_keys['1.2.3.4'] = "prefix"
# build query string
query_str = ""
for key, val in query_keys.items():
if val == "description":
query_str += "\"%s\" " % key
else:
query_str += "%s " % key
res = self.nipap.smart_search_prefix(self.auth, schema, query_str)
for interp in res['interpretation']:
self.assertEqual(interp['string'] in query_keys, True, "Function returned unknown interpreted string %s" % interp['string'])
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '1.3.3.77/32',
'type': 'host',
'description': 'test-ish prefix',
'comment': 'Test prefix #77! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
res = self.nipap.smart_search_prefix(self.auth, schema, r"""1.3.3.77 "-ish" """)
self.assertEqual(res['result'][-1]['prefix'], '1.3.3.77/32', 'Prefix not found')
def test_prefix_search_smart(self):
""" Test the smart prefix search function.
"""
schema = {'id': self.schema_attrs['id']}
# test full ipv4 address
res = self.nipap.smart_search_prefix(self.auth, schema, '1.3.3.7')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv4 address')
res = self.nipap.smart_search_prefix(self.auth, schema, '1.1')
self.assertEqual(res['interpretation'][0]['interpretation'], 'text', "Incorrectly interpreted '1.1' as : " + res['interpretation'][0]['interpretation'])
res = self.nipap.smart_search_prefix(self.auth, schema, '10/8')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv4 prefix')
res = self.nipap.smart_search_prefix(self.auth, schema, '2000:0::01')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv6 address')
def test_prefix_remove(self):
""" Remove a prefix
"""
schema = {'id': self.schema_attrs['id']}
prefix = self.nipap.list_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
# first make sure our prefix exists
self.assertEqual(prefix[0]['id'], self.prefix_attrs['id'], 'Record must exist before we can delete it')
# remove the prefix, by id
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
# check that search for old record doesn't return anything
prefix = self.nipap.list_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
self.assertEqual(prefix, [], 'Old entry still exists')
def test_prefix_indent_ipv4(self):
""" Check that our indentation calculation is working for IPv4
Prefixes gets an indent value automatically assigned to help in
displaying prefix information. The indent value is written on
updates to the table and this test is to make sure it is correctly
calculated.
"""
schema = {'id': self.schema_attrs['id']}
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.1/32' })[0]
p2 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.0/24' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.0.0/16' })[0]
self.assertEqual(p1['indent'], 4, "Indent calc on add failed")
self.assertEqual(p2['indent'], 3, "Indent calc on add failed")
self.assertEqual(p3['indent'], 0, "Indent calc on add failed")
# remove middle prefix
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix_attrs2['id'] })
# check that child prefix indent level has decreased
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.1/32' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.0.0/16' })[0]
self.assertEqual(p1['indent'], 3, "Indent calc on remove failed")
self.assertEqual(p3['indent'], 0, "Indent calc on remove failed")
def test_prefix_indent_ipv6(self):
""" Check that our indentation calculation is working for IPv6
Prefixes gets an indent value automatically assigned to help in
displaying prefix information. The indent value is written on
updates to the table and this test is to make sure it is correctly
calculated.
"""
schema = {'id': self.schema_attrs['id']}
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::1/128' })[0]
p2 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::/64' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:0::/48' })[0]
self.assertEqual(p1['indent'], 4, "Indent calc on add failed")
self.assertEqual(p2['indent'], 2, "Indent calc on add failed")
self.assertEqual(p3['indent'], 0, "Indent calc on add failed")
# remove middle prefix
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix6_attrs2['id'] })
# check that child prefix indent level has decreased
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::1/128' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:0::/48' })[0]
self.assertEqual(p1['indent'], 3, "Indent calc on remove failed for " + p1['prefix'] + " indent: " + str(p1['indent']))
self.assertEqual(p3['indent'], 0, "Indent calc on remove failed for " + p3['prefix'] + " indent: " + str(p3['indent']))
def test_find_free_prefix_input(self):
""" Mostly input testing of find_free_prefix
Try to stress find_free_prefix and send a lot of junk..
"""
schema = {'id': self.schema_attrs['id']}
# set up a prefix not used elsewhere so we have a known good state
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '100.0.0.0/16',
'type': 'reservation',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# no schema, should raise error!
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': ['100.0.0.0/16'] })
# incorrect from-prefix type, string instead of list of strings (looking like an IP address)
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': '100.0.0.0/16' })
# missing prefix_length
self.assertRaises(NipapMissingInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16' ], 'count': 1 })
# try giving both IPv4 and IPv6 in from-prefix which shouldn't work
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '2a00:800::0/25' ], 'prefix_length': 24, 'count': 1 })
# try giving non-integer as wanted prefix length
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': '24', 'count': 1 })
# try giving to high a number as wanted prefix length for IPv4
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': 35, 'count': 1 })
# try giving to high a number as wanted prefix length for IPv6
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '2a00:800::1/25'], 'prefix_length': 150, 'count': 1 })
# try giving a high number for result count (max is 1000)
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': 30, 'count': 55555 })
# don't pass 'family', which is required when specifying 'from-pool'
self.assertRaises(NipapMissingInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1 })
# pass crap as family, wrong type even
self.assertRaises(ValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1, 'family': 'crap' })
# pass 7 as family
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1, 'family': 7 })
# pass non existent pool
self.assertRaises(NipapNonExistentError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': 'crap' }, 'prefix_length': 24, 'count': 1, 'family': 4 })
def test_find_free_prefix1(self):
""" Functionality testing of find_free_prefix
Mostly based on 'from-prefix'
"""
schema = { 'id': self.schema_attrs['id'] }
# set up a prefix not used elsewhere so we have a known good state
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '100.0.0.0/16',
'type': 'assignment',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# simple test
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '1.3.3.0/24' ], 'prefix_length': 24, 'count': 1 })
self.assertEqual(res, ['100.0.0.0/24'], "Incorrect prefix set returned")
# simple test - only one input prefix (which did cause a bug, thus keeping it)
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16' ], 'prefix_length': 24, 'count': 1 })
self.assertEqual(res, ['100.0.0.0/24'], "Incorrect prefix set returned")
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '1.3.3.0/24' ], 'prefix_length': 24, 'count': 999 })
self.assertEqual(len(res), 256, "Incorrect prefix set returned")
def test_find_free_prefix2(self):
""" Functionality testing of find_free_prefix
Mostly based on 'from-pool'
"""
schema = { 'id': self.schema_attrs['id'] }
# we need a bloody pool first!
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'10.0.0.0/24',
'10.0.1.0/24',
'10.0.2.0/24',
'10.0.3.0/24',
'10.0.4.0/24'
]
for p in pfxs:
prefix_attrs = {
'type': 'reservation',
'authoritative_source': 'nipap-test',
'prefix': p,
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# from-pool test
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'count': 1, 'family': 4})
self.assertEqual(res, ['10.0.1.0/30'], "Incorrect prefix set returned when requesting default prefix-length")
# from-pool test, specify wanted prefix length
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'count': 1, 'family': 4, 'prefix_length': 31})
self.assertEqual(res, ['10.0.1.0/31'], "Incorrect prefix set returned with explicit prefix-length")
def test_edit_prefix(self):
""" Functionality testing of edit_prefix.
"""
schema = { 'id': self.schema_attrs['id'] }
data = {
'prefix': '192.0.2.0/24',
'description': 'foo',
'comment': 'bar',
'order_id': '0xBEEF',
'customer_id': 'CUST-EEF-DERP',
'alarm_priority': 'low',
'type': 'assignment',
'node': 'TOK-CORE-1',
'country': 'EE',
'authoritative_source': 'unittest',
'pool': self.pool_attrs['id']
}
# basic edit
self.nipap.edit_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] }, data)
p = self.nipap.list_prefix(self.auth, schema, {'id': self.prefix_attrs['id']})[0]
# remove what we did not touch
for k, v in data.keys():
if k not in p:
del p[k]
self.assertEqual(data, p, "Prefix data incorrect after edit.")
# create a collision
self.assertRaises(NipapError, self.nipap.edit_prefix, self.auth, schema, {'id': self.prefix_attrs2['id']}, {'prefix': data['prefix']})
# try to change schema - disallowed
self.assertRaises(NipapExtraneousInputError, self.nipap_edit_prefix, self.auth, schema, {'id': self.prefix_attrs2['id']}, {'schema': self.schema_attrs2['id']})
def test_add_asn(self):
""" Test adding ASNs to NIPAP.
"""
data = {
'asn': 1,
'name': 'Test ASN #1'
}
self.assertEqual(self.nipap.add_asn(self.auth, data), 1, "add_asn did not return correct ASN.")
asn = self.nipap.list_asn(self.auth, { 'asn': 1 })[0]
self.assertEquals(data, asn, "ASN in database not equal to what was added.")
self.assertRaises(NipapDuplicateError, self.nipap.add_asn, self.auth, data)
def test_remove_asn(self):
""" Test removing ASNs from NIPAP.
"""
data = {
'asn': 2,
'name': 'Test ASN #2'
}
asn = self.nipap.add_asn(self.auth, data)
self.nipap.remove_asn(self.auth, asn)
self.assertEquals(0, len(self.nipap.list_asn(self.auth, { 'asn': 2 })), "Removed ASN still in database")
def test_edit_asn(self):
""" Test editing ASNs.
"""
data = {
'asn': 3,
'name': 'Test ASN #3'
}
asn = self.nipap.add_asn(self.auth, data)
self.nipap.edit_asn(self.auth, data['asn'], { 'name': 'b0rk' })
self.assertEquals(self.nipap.list_asn(self.auth, { 'asn': 3 })[0]['name'], 'b0rk', "Edited ASN still has it's old name.")
self.assertRaises(NipapExtraneousInputError, self.nipap.edit_asn, self.auth, {'asn': 3}, {'asn': 4, 'name': 'Test ASN #4'})
def test_search_asn(self):
""" Test searching ASNs.
"""
data = {
'asn': 4,
'name': 'This is AS number 4'
}
asn = self.nipap.add_asn(self.auth, data)
q = {
'operator': 'equals',
'val1': 'asn',
'val2': data['asn']
}
res = self.nipap.search_asn(self.auth, q)
self.assertEquals(len(res['result']), 1, "equal search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['name'], data['name'], "search hit got wrong name")
q = {
'operator': 'regex_match',
'val1': 'name',
'val2': 'number'
}
res = self.nipap.search_asn(self.auth, q)
self.assertEquals(len(res['result']), 1, "regex search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
def test_smart_search_asn(self):
""" Test smart_search_asn function.
"""
data = {
'asn': 5,
'name': 'Autonomous System Number 5'
}
asn = self.nipap.add_asn(self.auth, data)
res = self.nipap.smart_search_asn(self.auth, "Autonomous")
self.assertEquals(len(res['result']), 1, "search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
self.assertEquals(res['interpretation'][0]['attribute'], 'name', "search term interpretated as wrong type")
res = self.nipap.smart_search_asn(self.auth, "5")
self.assertEquals(len(res['result']), 1, "search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
self.assertEquals(res['interpretation'][0]['attribute'], 'asn', "search term interpretated as wrong type")
def main():
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
if __name__ == '__main__':
log_format = "%(levelname)-8s %(message)s"
logging.basicConfig(format=log_format)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
main()
| mit |
torchingloom/django-registration | registration/forms.py | 2 | 3191 | """
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
from registration.users import UserModel
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
email = forms.EmailField(label=_("E-mail"))
class Meta:
model = UserModel()
fields = ("username", "email")
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_('I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if UserModel().objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| bsd-3-clause |
codebox/algorithms | graph_data_structures.py | 1 | 1452 | class DirectedAdjacencyMatrix:
def __init__(self, n):
self.n = n
self.data = [[0] * n for i in range(n)]
def connect(self, i_from, i_to):
self.data[i_from][i_to] += 1
def disconnect(self, i_from, i_to):
self.data[i_from][i_to] = max(0, self.data[i_from][i_to] - 1)
def are_connected(self, n1, n2):
return self.data[n1][n2] > 0 or self.data[n2][n1] > 0
def get_outgoing(self, i_from):
return [i for i in range(self.n) if self.data[i_from][i]]
def get_incoming(self, i_to):
return [i for i in range(self.n) if self.data[i][i_to]]
class DirectedAdjacencyList:
def __init__(self, n):
self.n = n
self.vertices = range(n)
self.edges = []
def connect(self, i_from, i_to):
self.edges.append([i_from, i_to])
def disconnect(self, i_from, i_to):
for edge in self.edges:
if edge[0] == i_from and edge[1] == i_to:
self.edges.remove(edge)
break
def are_connected(self, n1, n2):
for edge in self.edges:
if (edge[0] == n1 and edge[1] == n2) or (edge[0] == n2 and edge[1] == n1):
return True
return False
def get_outgoing(self, i_from):
return list(set([edge[1] for edge in self.edges if edge[0] == i_from]))
def get_incoming(self, i_to):
return list(set([edge[0] for edge in self.edges if edge[1] == i_to]))
| mit |
Elder-of-Ozone/i3pystatus | i3pystatus/network.py | 6 | 14457 | import netifaces
from i3pystatus import IntervalModule
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.util import make_graph, round_dict, make_bar
def count_bits(integer):
bits = 0
while (integer):
integer &= integer - 1
bits += 1
return bits
def v6_to_int(v6):
return int(v6.replace(":", ""), 16)
def prefix6(mask):
return count_bits(v6_to_int(mask))
def cidr6(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix6(mask))
def v4_to_int(v4):
sum = 0
mul = 1
for part in reversed(v4.split(".")):
sum += int(part) * mul
mul *= 2 ** 8
return sum
def prefix4(mask):
return count_bits(v4_to_int(mask))
def cidr4(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix4(mask))
def get_bonded_slaves():
try:
with open("/sys/class/net/bonding_masters") as f:
masters = f.read().split()
except FileNotFoundError:
return {}
slaves = {}
for master in masters:
with open("/sys/class/net/{}/bonding/slaves".format(master)) as f:
for slave in f.read().split():
slaves[slave] = master
return slaves
def sysfs_interface_up(interface, unknown_up=False):
try:
with open("/sys/class/net/{}/operstate".format(interface)) as f:
status = f.read().strip()
except FileNotFoundError:
# Interface doesn't exist
return False
return status == "up" or unknown_up and status == "unknown"
class NetworkInfo():
"""
Retrieve network information.
"""
def __init__(self, interface, ignore_interfaces, detached_down, unknown_up, get_wifi_info=False):
if interface not in netifaces.interfaces() and not detached_down:
raise RuntimeError(
"Unknown interface {iface}!".format(iface=interface))
self.ignore_interfaces = ignore_interfaces
self.detached_down = detached_down
self.unknown_up = unknown_up
self.get_wifi_info = get_wifi_info
def get_info(self, interface):
format_dict = dict(v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="")
iface_up = sysfs_interface_up(interface, self.unknown_up)
if not iface_up:
return format_dict
network_info = netifaces.ifaddresses(interface)
slaves = get_bonded_slaves()
try:
master = slaves[interface]
except KeyError:
pass
else:
if sysfs_interface_up(interface, self.unknown_up):
master_info = netifaces.ifaddresses(master)
for af in (netifaces.AF_INET, netifaces.AF_INET6):
try:
network_info[af] = master_info[af]
except KeyError:
pass
try:
mac = network_info[netifaces.AF_PACKET][0]["addr"]
except KeyError:
mac = "NONE"
format_dict['mac'] = mac
if iface_up:
format_dict.update(self.extract_network_info(network_info))
format_dict.update(self.extract_wireless_info(interface))
return format_dict
@staticmethod
def extract_network_info(network_info):
info = dict()
if netifaces.AF_INET in network_info:
v4 = network_info[netifaces.AF_INET][0]
info["v4"] = v4["addr"]
info["v4mask"] = v4["netmask"]
info["v4cidr"] = cidr4(v4["addr"], v4["netmask"])
if netifaces.AF_INET6 in network_info:
for v6 in network_info[netifaces.AF_INET6]:
info["v6"] = v6["addr"]
info["v6mask"] = v6["netmask"]
info["v6cidr"] = cidr6(v6["addr"], v6["netmask"])
if not v6["addr"].startswith("fe80::"): # prefer non link-local addresses
break
return info
def extract_wireless_info(self, interface):
info = dict(essid="", freq="", quality=0.0, quality_bar="")
# Just return empty values if we're not using any Wifi functionality
if not self.get_wifi_info:
return info
import basiciw
try:
iwi = basiciw.iwinfo(interface)
except Exception:
# Not a wireless interface
return info
info["essid"] = iwi["essid"]
info["freq"] = iwi["freq"]
quality = iwi["quality"]
if quality["quality_max"] > 0:
info["quality"] = quality["quality"] / quality["quality_max"]
else:
info["quality"] = quality["quality"]
info["quality"] *= 100
info["quality_bar"] = make_bar(info["quality"])
info["quality"] = round(info["quality"])
return info
class NetworkTraffic():
"""
Retrieve network traffic information
"""
pnic = None
pnic_before = None
def __init__(self, unknown_up, divisor, round_size):
self.unknown_up = unknown_up
self.divisor = divisor
self.round_size = round_size
def update_counters(self, interface):
import psutil
self.pnic_before = self.pnic
counters = psutil.net_io_counters(pernic=True)
self.pnic = counters[interface] if interface in counters else None
def clear_counters(self):
self.pnic_before = None
self.pnic = None
def get_bytes_sent(self):
return (self.pnic.bytes_sent - self.pnic_before.bytes_sent) / self.divisor
def get_bytes_received(self):
return (self.pnic.bytes_recv - self.pnic_before.bytes_recv) / self.divisor
def get_packets_sent(self):
return self.pnic.packets_sent - self.pnic_before.packets_sent
def get_packets_received(self):
return self.pnic.packets_recv - self.pnic_before.packets_recv
def get_rx_tot_Mbytes(self, interface):
try:
with open("/sys/class/net/{}/statistics/rx_bytes".format(interface)) as f:
return int(f.readline().split('\n')[0]) / (1024 * 1024)
except FileNotFoundError:
return False
def get_tx_tot_Mbytes(self, interface):
try:
with open("/sys/class/net/{}/statistics/tx_bytes".format(interface)) as f:
return int(f.readline().split('\n')[0]) / (1024 * 1024)
except FileNotFoundError:
return False
def get_usage(self, interface):
self.update_counters(interface)
usage = dict(bytes_sent=0, bytes_recv=0, packets_sent=0, packets_recv=0)
if not sysfs_interface_up(interface, self.unknown_up) or not self.pnic_before:
return usage
else:
usage["bytes_sent"] = self.get_bytes_sent()
usage["bytes_recv"] = self.get_bytes_received()
usage["packets_sent"] = self.get_packets_sent()
usage["packets_recv"] = self.get_packets_received()
usage["rx_tot_Mbytes"] = self.get_rx_tot_Mbytes(interface)
usage["tx_tot_Mbytes"] = self.get_tx_tot_Mbytes(interface)
round_dict(usage, self.round_size)
return usage
class Network(IntervalModule, ColorRangeModule):
"""
Displays network information for an interface.
Requires the PyPI packages `colour`, `netifaces`, `psutil` (optional, see below)
and `basiciw` (optional, see below).
.. rubric:: Available formatters
Network Information Formatters:
* `{interface}` — same as setting
* `{v4}` — IPv4 address
* `{v4mask}` — subnet mask
* `{v4cidr}` — IPv4 address in cidr notation (i.e. 192.168.2.204/24)
* `{v6}` — IPv6 address
* `{v6mask}` — subnet mask
* `{v6cidr}` — IPv6 address in cidr notation
* `{mac}` — MAC of interface
Wireless Information Formatters (requires PyPI package `basiciw`):
* `{essid}` — ESSID of currently connected wifi
* `{freq}` — Current frequency
* `{quality}` — Link quality in percent
* `{quality_bar}` —Bar graphically representing link quality
Network Traffic Formatters (requires PyPI pacakge `psutil`):
* `{interface}` — the configured network interface
* `{kbs}` – Float representing kb\s
* `{network_graph}` – Unicode graph representing network usage
* `{bytes_sent}` — bytes sent per second (divided by divisor)
* `{bytes_recv}` — bytes received per second (divided by divisor)
* `{packets_sent}` — bytes sent per second (divided by divisor)
* `{packets_recv}` — bytes received per second (divided by divisor)
* `{rx_tot_Mbytes}` — total Mbytes received
* `{tx_tot_Mbytes}` — total Mbytes sent
"""
settings = (
("format_up", "format string"),
("format_down", "format string"),
"color_up",
"color_down",
("interface", "Interface to watch, eg 'eth0'"),
("dynamic_color", "Set color dynamically based on network traffic. Note: this overrides color_up"),
("start_color", "Hex or English name for start of color range, eg '#00FF00' or 'green'"),
("end_color", "Hex or English name for end of color range, eg '#FF0000' or 'red'"),
("graph_width", "Width of the network traffic graph"),
("graph_style", "Graph style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake')"),
("upper_limit",
"Expected max kb/s. This value controls how the network traffic graph is drawn and in what color"),
("graph_type", "Whether to draw the network traffic graph for input or output. "
"Allowed values 'input' or 'output'"),
("divisor", "divide all byte values by this value"),
("ignore_interfaces", "Array of interfaces to ignore when cycling through "
"on click, eg, ['lo']"),
("round_size", "defines number of digits in round"),
("detached_down", "If the interface doesn't exist, display it as if it were down"),
("unknown_up", "If the interface is in unknown state, display it as if it were up"),
)
interval = 1
interface = 'eth0'
format_up = "{interface} {network_graph}{kbs}KB/s"
format_down = "{interface}: DOWN"
color_up = "#00FF00"
color_down = "#FF0000"
dynamic_color = True
graph_type = 'input'
graph_width = 15
graph_style = 'blocks'
upper_limit = 150.0
# Network traffic settings
divisor = 1024
round_size = None
# Network info settings
detached_down = True
unknown_up = False
ignore_interfaces = ["lo"]
on_leftclick = "nm-connection-editor"
on_rightclick = "cycle_interface"
on_upscroll = ['cycle_interface', 1]
on_downscroll = ['cycle_interface', -1]
def init(self):
# Don't require importing basiciw unless using the functionality it offers.
if any(s in self.format_up or s in self.format_up for s in
['essid', 'freq', 'quality', 'quality_bar']):
get_wifi_info = True
else:
get_wifi_info = False
self.network_info = NetworkInfo(self.interface, self.ignore_interfaces, self.detached_down, self.unknown_up,
get_wifi_info)
# Don't require importing psutil unless using the functionality it offers.
if any(s in self.format_up or s in self.format_down for s in
['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'network_graph',
'rx_tot_Mbytes', 'tx_tot_Mbytes', 'kbs']):
self.network_traffic = NetworkTraffic(self.unknown_up, self.divisor, self.round_size)
else:
self.network_traffic = None
if not self.dynamic_color:
self.end_color = self.start_color
self.colors = self.get_hex_color_range(self.start_color, self.end_color, int(self.upper_limit))
self.kbs_arr = [0.0] * self.graph_width
def cycle_interface(self, increment=1):
"""Cycle through available interfaces in `increment` steps. Sign indicates direction."""
interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces]
if self.interface in interfaces:
next_index = (interfaces.index(self.interface) + increment) % len(interfaces)
self.interface = interfaces[next_index]
elif len(interfaces) > 0:
self.interface = interfaces[0]
if self.network_traffic:
self.network_traffic.clear_counters()
self.kbs_arr = [0.0] * self.graph_width
def get_network_graph(self, kbs):
# Cycle array by inserting at the start and chopping off the last element
self.kbs_arr.insert(0, kbs)
self.kbs_arr = self.kbs_arr[:self.graph_width]
return make_graph(self.kbs_arr, 0.0, self.upper_limit, self.graph_style)
def run(self):
format_values = dict(kbs="", network_graph="", bytes_sent="", bytes_recv="", packets_sent="", packets_recv="",
rx_tot_Mbytes="", tx_tot_Mbytes="",
interface="", v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="", mac="",
essid="", freq="", quality="", quality_bar="")
if self.network_traffic:
network_usage = self.network_traffic.get_usage(self.interface)
format_values.update(network_usage)
if self.graph_type == 'input':
kbs = network_usage['bytes_recv']
elif self.graph_type == 'output':
kbs = network_usage['bytes_sent']
else:
raise Exception("graph_type must be either 'input' or 'output'!")
format_values['network_graph'] = self.get_network_graph(kbs)
format_values['kbs'] = "{0:.1f}".format(round(kbs, 2)).rjust(6)
color = self.get_gradient(kbs, self.colors, self.upper_limit)
else:
color = None
if sysfs_interface_up(self.interface, self.unknown_up):
if not color:
color = self.color_up
format_str = self.format_up
else:
color = self.color_down
format_str = self.format_down
network_info = self.network_info.get_info(self.interface)
format_values.update(network_info)
format_values['interface'] = self.interface
self.output = {
"full_text": format_str.format(**format_values),
'color': color,
}
| mit |
ZacariasBendeck/youtube-dl | youtube_dl/extractor/ro220.py | 176 | 1451 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
mahadeva604/ansible-modules-extras | windows/win_dotnet_ngen.py | 5 | 1778 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_dotnet_ngen
version_added: "2.0"
short_description: Runs ngen to recompile DLLs after .NET updates
description:
- After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host.
- This happens via scheduled task, usually at some inopportune time.
- This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time.
- "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx"
notes:
- there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem
- there's no way to test if they've been completed (?)
- the stdout is quite likely to be several megabytes
options:
author: Peter Mounce
'''
EXAMPLES = '''
# Run ngen tasks
win_dotnet_ngen:
'''
| gpl-3.0 |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/overview/panel.py | 1 | 1101 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Overview(horizon.Panel):
name = _("Overview")
slug = 'overview'
img = '/static/dashboard/img/nav/overview1.png'
dashboard.Project.register(Overview)
| gpl-2.0 |
NickDaly/GemRB-FixConfig-Branch | gemrb/GUIScripts/iwd/GUIWORLD.py | 2 | 12851 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIW.py - scripts to control some windows from GUIWORLD winpack
# except of Actions, Portrait, Options and Dialog windows
#################################################################
import GemRB
from GUIDefines import *
from ie_restype import *
import GUICommon
import GUICommonWindows
import GUIClasses
FRAME_PC_SELECTED = 0
FRAME_PC_TARGET = 1
ContainerWindow = None
ContinueWindow = None
ReformPartyWindow = None
OldActionsWindow = None
OldMessageWindow = None
Container = None
def CloseContinueWindow ():
if ContinueWindow:
# don't close the actual window now to avoid flickering: we might still want it open
GemRB.SetVar ("DialogChoose", GemRB.GetVar ("DialogOption"))
def NextDialogState ():
global ContinueWindow, OldActionsWindow
if ContinueWindow == None:
return
hideflag = GemRB.HideGUI ()
if ContinueWindow:
ContinueWindow.Unload ()
GemRB.SetVar ("ActionsWindow", OldActionsWindow.ID)
ContinueWindow = None
OldActionsWindow = None
if hideflag:
GemRB.UnhideGUI ()
def OpenEndMessageWindow ():
global ContinueWindow, OldActionsWindow
hideflag = GemRB.HideGUI ()
if not ContinueWindow:
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContinueWindow = Window = GemRB.LoadWindow (9)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
#end dialog
Button = ContinueWindow.GetControl (0)
Button.SetText (9371)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseContinueWindow)
if GUICommonWindows.PortraitWindow:
GUICommonWindows.UpdatePortraitWindow ()
if hideflag:
GemRB.UnhideGUI ()
def OpenContinueMessageWindow ():
global ContinueWindow, OldActionsWindow
hideflag = GemRB.HideGUI ()
if not ContinueWindow:
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContinueWindow = Window = GemRB.LoadWindow (9)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
#continue
Button = ContinueWindow.GetControl (0)
Button.SetText (9372)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseContinueWindow)
if hideflag:
GemRB.UnhideGUI ()
def CloseContainerWindow ():
global OldActionsWindow, OldMessageWindow, ContainerWindow
if ContainerWindow == None:
return
hideflag = GemRB.HideGUI ()
if ContainerWindow:
ContainerWindow.Unload ()
ContainerWindow = None
GemRB.SetVar ("ActionsWindow", OldActionsWindow.ID)
GemRB.SetVar ("MessageWindow", OldMessageWindow.ID)
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 2)
#play closing sound if applicable
if tmp!='*':
GemRB.PlaySound (tmp)
#it is enough to close here
if hideflag:
GemRB.UnhideGUI ()
def UpdateContainerWindow ():
global Container
Window = ContainerWindow
pc = GemRB.GameGetFirstSelectedPC ()
GUICommon.SetEncumbranceLabels( Window, 0x10000043, 0x10000044, pc)
party_gold = GemRB.GameGetPartyGold ()
Text = Window.GetControl (0x10000036)
Text.SetText (str (party_gold))
Container = GemRB.GetContainer(0) #will use first selected pc anyway
LeftCount = Container['ItemCount']
ScrollBar = Window.GetControl (52)
Count = LeftCount/3
if Count<1:
Count=1
ScrollBar.SetVarAssoc ("LeftTopIndex", Count)
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
ScrollBar = Window.GetControl (53)
Count = RightCount/2
if Count<1:
Count=1
ScrollBar.SetVarAssoc ("RightTopIndex", Count)
RedrawContainerWindow ()
def RedrawContainerWindow ():
Window = ContainerWindow
LeftTopIndex = GemRB.GetVar ("LeftTopIndex") * 3
LeftIndex = GemRB.GetVar ("LeftIndex")
RightTopIndex = GemRB.GetVar ("RightTopIndex") * 2
RightIndex = GemRB.GetVar ("RightIndex")
LeftCount = Container['ItemCount']
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
for i in range (6):
#this is an autoselected container, but we could use PC too
Slot = GemRB.GetContainerItem (0, i+LeftTopIndex)
Button = Window.GetControl (i)
if Slot != None:
Button.SetVarAssoc ("LeftIndex", LeftTopIndex+i)
else:
Button.SetVarAssoc ("LeftIndex", -1)
GUICommon.UpdateInventorySlot (pc, Button, Slot, "container")
for i in range (4):
if i+RightTopIndex<RightCount:
Slot = GemRB.GetSlotItem (pc, inventory_slots[i+RightTopIndex])
else:
Slot = None
Button = Window.GetControl (i+10)
if Slot!=None:
Button.SetVarAssoc ("RightIndex", RightTopIndex+i)
else:
Button.SetVarAssoc ("RightIndex", -1)
GUICommon.UpdateInventorySlot (pc, Button, Slot, "inventory")
def OpenContainerWindow ():
global OldActionsWindow, OldMessageWindow
global ContainerWindow, Container
if ContainerWindow:
return
hideflag = GemRB.HideGUI ()
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContainerWindow = Window = GemRB.LoadWindow (8)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
OldMessageWindow = GUIClasses.GWindow( GemRB.GetVar ("MessageWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
GemRB.SetVar ("MessageWindow", -1)
Container = GemRB.GetContainer(0)
# 0 - 5 - Ground Item
# 10 - 13 - Personal Item
# 50 hand
# 52, 53 scroller ground, scroller personal
# 54 - encumbrance
for i in range (6):
Button = Window.GetControl (i)
Button.SetVarAssoc ("LeftIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, TakeItemContainer)
for i in range (4):
Button = Window.GetControl (i+10)
Button.SetVarAssoc ("RightIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, DropItemContainer)
# left scrollbar
ScrollBar = Window.GetControl (52)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
# right scrollbar
ScrollBar = Window.GetControl (53)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
Button = Window.GetControl (54)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.CreateLabelOnButton (0x10000043, "NUMBER", IE_FONT_ALIGN_LEFT|IE_FONT_ALIGN_TOP)
Button.CreateLabelOnButton (0x10000044, "NUMBER", IE_FONT_ALIGN_RIGHT|IE_FONT_ALIGN_BOTTOM)
Button = Window.GetControl (50)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 0)
if tmp!='*':
GemRB.PlaySound (tmp)
tmp = Table.GetValue (row, 1)
if tmp!='*':
Button.SetSprites (tmp, 0, 0, 0, 0, 0 )
# Done
Button = Window.GetControl (51)
#no text
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, LeaveContainer)
GemRB.SetVar ("LeftTopIndex", 0)
GemRB.SetVar ("RightTopIndex", 0)
UpdateContainerWindow ()
if hideflag:
GemRB.UnhideGUI ()
#doing this way it will inform the core system too, which in turn will call
#CloseContainerWindow ()
def LeaveContainer ():
GemRB.LeaveContainer()
def DropItemContainer ():
RightIndex = GemRB.GetVar ("RightIndex")
if RightIndex<0:
return
#we need to get the right slot number
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
if RightIndex >= len(inventory_slots):
return
GemRB.ChangeContainerItem (0, inventory_slots[RightIndex], 0)
UpdateContainerWindow ()
def TakeItemContainer ():
LeftIndex = GemRB.GetVar ("LeftIndex")
if LeftIndex<0:
return
if LeftIndex >= Container['ItemCount']:
return
GemRB.ChangeContainerItem (0, LeftIndex, 1)
UpdateContainerWindow ()
def UpdateReformWindow ():
Window = ReformPartyWindow
select = GemRB.GetVar ("Selected")
need_to_drop = GemRB.GetPartySize ()-PARTY_SIZE
if need_to_drop<0:
need_to_drop = 0
#excess player number
Label = Window.GetControl (0x1000000f)
Label.SetText (str(need_to_drop) )
#done
Button = Window.GetControl (8)
if need_to_drop:
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
Button.SetState (IE_GUI_BUTTON_ENABLED)
#remove
Button = Window.GetControl (15)
if select:
Button.SetState (IE_GUI_BUTTON_ENABLED)
else:
Button.SetState (IE_GUI_BUTTON_DISABLED)
for i in range (PARTY_SIZE+1):
Button = Window.GetControl (i)
Button.EnableBorder (FRAME_PC_SELECTED, select == i+2 )
#+2 because protagonist is skipped
pic = GemRB.GetPlayerPortrait (i+2,1)
if not pic:
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetState (IE_GUI_BUTTON_LOCKED)
continue
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_ALIGN_BOTTOM|IE_GUI_BUTTON_ALIGN_LEFT, OP_SET)
Button.SetPicture (pic, "NOPORTSM")
GUICommonWindows.UpdatePortraitWindow ()
return
def RemovePlayer ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
if ReformPartyWindow:
ReformPartyWindow.Unload ()
ReformPartyWindow = Window = GemRB.LoadWindow (25)
GemRB.SetVar ("OtherWindow", Window.ID)
#are you sure
Label = Window.GetControl (0x0fffffff)
Label.SetText (17518)
#confirm
Button = Window.GetControl (1)
Button.SetText (17507)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayerConfirm)
Button.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
#cancel
Button = Window.GetControl (2)
Button.SetText (13727)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayerCancel)
Button.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
def RemovePlayerConfirm ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
ReformPartyWindow.Unload ()
GemRB.SetVar ("OtherWindow", -1)
#removing selected player
ReformPartyWindow = None
if hideflag:
GemRB.UnhideGUI ()
GemRB.LeaveParty (GemRB.GetVar("Selected") )
OpenReformPartyWindow ()
return
def RemovePlayerCancel ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
ReformPartyWindow.Unload ()
GemRB.SetVar ("OtherWindow", -1)
ReformPartyWindow = None
if hideflag:
GemRB.UnhideGUI ()
OpenReformPartyWindow ()
return
def OpenReformPartyWindow ():
global ReformPartyWindow
GemRB.SetVar ("Selected", 0)
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
if ReformPartyWindow:
ReformPartyWindow.Unload ()
ReformPartyWindow = None
GemRB.SetVar ("OtherWindow", -1)
#GemRB.LoadWindowPack ("GUIREC")
if hideflag:
GemRB.UnhideGUI ()
#re-enabling party size control
GemRB.GameSetPartySize (PARTY_SIZE)
return
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ReformPartyWindow = Window = GemRB.LoadWindow (24)
GemRB.SetVar ("OtherWindow", Window.ID)
#PC portraits
for j in range (PARTY_SIZE+1):
Button = Window.GetControl (j)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_RADIOBUTTON|IE_GUI_BUTTON_NO_IMAGE|IE_GUI_BUTTON_PICTURE,OP_SET)
Button.SetBorder (FRAME_PC_SELECTED, 1, 1, 2, 2, 0, 255, 0, 255)
#protagonist is skipped
index = j + 2
Button.SetVarAssoc ("Selected", index)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, UpdateReformWindow)
# Remove
Button = Window.GetControl (15)
Button.SetText (17507)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayer)
# Done
Button = Window.GetControl (8)
Button.SetText (11973)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenReformPartyWindow)
GemRB.SetVar ("ActionsWindow", -1)
UpdateReformWindow ()
if hideflag:
GemRB.UnhideGUI ()
Window.ShowModal (MODAL_SHADOW_GRAY)
return
def DeathWindow ():
#no death movie, but music is changed
GemRB.LoadMusicPL ("Theme.mus",1)
GemRB.HideGUI ()
GemRB.SetTimedEvent (DeathWindowEnd, 10)
return
def DeathWindowEnd ():
GemRB.GamePause (1,1)
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
Window = GemRB.LoadWindow (17)
#reason for death
Label = Window.GetControl (0x0fffffff)
Label.SetText (16498)
#load
Button = Window.GetControl (1)
Button.SetText (15590)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, LoadPress)
#quit
Button = Window.GetControl (2)
Button.SetText (15417)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, QuitPress)
GemRB.HideGUI ()
GemRB.SetVar ("MessageWindow", -1)
GemRB.UnhideGUI ()
Window.ShowModal (MODAL_SHADOW_GRAY)
return
def QuitPress():
GemRB.QuitGame ()
GemRB.SetNextScript ("Start")
return
def LoadPress():
GemRB.QuitGame ()
GemRB.SetNextScript ("GUILOAD")
return
| gpl-2.0 |
jjo31/ATHAM-Fluidity | tests/turbine_flux_dg_2d/mesh/scripts/triangle_add_edgeowner.py | 20 | 4796 | #!/usr/bin/env python
import sys
import triangle
import copy
import numpy
from sets import Set
#input surface_id, filename
# 5.5.2010: this script adds a new attribute to the .edge file which holds the "owner" element number of this edge
# Here is an examle geo file for this script:
# Point(1) = {0, 0, 0, 2};
# Point(2) = {1, 0, 0, 2};
# Point(3) = {1, 1, 0, 2};
# Point(4) = {0, 1, 0, 2};
# Point(5) = {0.5, 0, 0, 2};
# Point(6) = {0.5, 1, 0, 2};
# Point(7) = {0.500001, 0, 0, 2};
# Point(8) = {0.500001, 1, 0, 2};
# Point(9) = {0.4, -0.1, 0, 2};
# Point(10) = {0.4, 1.1, 0, 2};
#
#
# Line(1) = {4, 1};
# Line(2) = {1, 9};
# Line(3) = {9, 5};
# Line(4) = {5, 6};
# Line(9) = {6, 10};
# Line(10) = {10, 4};
#
# Line(5) = {8, 7};
# Line(6) = {7, 2};
# Line(7) = {2, 3};
# Line(8) = {3, 8};
#
# Physical Line(20) = {1};
# Physical Line(21) = {2};
# Physical Line(22) = {3};
# Physical Line(23) = {4};
# Physical Line(28) = {9};
# Physical Line(29) = {10};
#
# Physical Line(24) = {5};
# Physical Line(25) = {6};
# Physical Line(26) = {7};
# Physical Line(27) = {8};
#
# Line Loop(10) = {4, 9, 10, 1, 2, 3};
# Line Loop(11) = {8, 5, 6, 7};
#
# Plane Surface(11) = {10};
# Plane Surface(12) = {11};
# Physical Surface(12) = {11, 12};
########################################################################################################
def nodedupl_recursion(elein, edgein, nodeid, boundary_id):
global copy_eles, copy_edges, copy_nodes, debug, copy_surface_ids, copy_surface_id, copy_surfaceowner_ids, copy_region_ids
next_edgein=triangle.get_partner_edge(edgein, nodeid, boundary_id)
if next_edgein==None:
print "Reached one end of the surface boundary."
return
if debug>1:
print "Lets loop around nodeid", nodeid, " starting with ele", elein+1, " with boundary edge ", edgein+1, " until we reach the next surface edge with id ", next_edgein+1
next_elein_list=triangle.get_eles_on_ele_side(elein, nodeid, edgein, boundary_id)
if debug>1:
print "Duplicate edge ", next_edgein +1
copy_edges.append(triangle.edges[next_edgein])
copy_surface_ids.append(new_surface_id)
copy_surfaceowner_ids.append(next_elein_list[len(next_elein_list)-1]+1) # update copy_surfaceowner_ids for the new edge
# update copy_surfaceowner_ids for the old edge
if triangle.ele_with_edgeids(next_edgein)[0]==next_elein_list[len(next_elein_list)-1]:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[1]+1
else:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[0]+1
if (triangle.edges[next_edgein][0]==nodeid):
next_nodeid=triangle.edges[next_edgein][1]
else:
next_nodeid=triangle.edges[next_edgein][0]
nodedupl_recursion(next_elein_list[len(next_elein_list)-1], next_edgein, next_nodeid, boundary_id)
########################################################################################################
if not len(sys.argv)==2:
print "Usage: seperate_internal_boundary.py file"
print ""
print "output fixed .edge, .ele and .node file with new edge attribute holding the element owner of the edge. "
print ""
print "The outout files will be have the suffix edgow"
exit()
filename=sys.argv[1]
debug=2
triangle.read_nodefile(filename+'.node')
if triangle.dim!=2:
print "Only 2 dim meshes supported so far"
triangle.read_edgefile(filename+'.edge')
triangle.read_elefile(filename+'.ele')
copy_eles=copy.deepcopy(triangle.eles)
copy_region_ids=copy.deepcopy(triangle.region_ids)
copy_edges=copy.deepcopy(triangle.edges)
copy_surface_ids=copy.deepcopy(triangle.surface_ids)
copy_surfaceowner_ids=[-1 for i in range(0,len(triangle.surface_ids))] # Will store the elemed id for each surface edge
copy_nodes=copy.deepcopy(triangle.nodes)
# Now assign the surfaceowner_id to the external boundaries
for e in range(0,len(copy_surfaceowner_ids)):
if copy_surfaceowner_ids[e]>=0:
print "Internal Error. Ask [email protected]!"
exit()
if len(triangle.ele_with_edgeids(e))!=1:
print "Error Found internal boundary!"
exit()
copy_surfaceowner_ids[e]=triangle.ele_with_edgeids(e)[0]+1
if debug>0:
print "save node file as ", filename, "_edgow.node"
triangle.save_nodefile(copy_nodes, 2, filename+"_edgow.node")
if debug>0:
print "save ele file as ", filename, "_edgow.ele"
triangle.save_elefile(copy_eles, copy_region_ids, filename+"_edgow.ele")
if debug>0:
print "save edge file as ", filename, "_edgow.edge"
triangle.save_edgefile2(copy_edges, copy_surface_ids, copy_surfaceowner_ids, filename+"_edgow.edge")
| lgpl-2.1 |
jsma/django-cms | menus/utils.py | 11 | 4500 | # -*- coding: utf-8 -*-
from cms.models.titlemodels import Title
from cms.utils import get_language_from_request
from cms.utils.i18n import force_language, hide_untranslated
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch, reverse, resolve
def mark_descendants(nodes):
for node in nodes:
node.descendant = True
mark_descendants(node.children)
def cut_levels(nodes, level):
"""
For cutting the nav_extender levels if you have a from_level in the navigation.
"""
if nodes:
if nodes[0].level == level:
return nodes
return sum((cut_levels(node.children, level) for node in nodes), [])
def find_selected(nodes):
"""
Finds a selected nav_extender node
"""
for node in nodes:
if hasattr(node, "selected"):
return node
elif hasattr(node, "ancestor"):
result = find_selected(node.children)
if result:
return result
def set_language_changer(request, func):
"""
Sets a language chooser function that accepts one parameter: language
The function should return a url in the supplied language
normally you would want to give it the get_absolute_url function with an optional language parameter
example:
def get_absolute_url(self, language=None):
reverse('product_view', args=[self.get_slug(language=language)])
Use this function in your nav extender views that have i18n slugs.
"""
request._language_changer = func
def language_changer_decorator(language_changer):
"""
A decorator wrapper for set_language_changer.
from menus.utils import language_changer_decorator
@language_changer_decorator(function_get_language_changer_url)
def my_view_function(request, somearg):
pass
"""
def _decorator(func):
def _wrapped(request, *args, **kwargs):
set_language_changer(request, language_changer)
return func(request, *args, **kwargs)
_wrapped.__name__ = func.__name__
_wrapped.__doc__ = func.__doc__
return _wrapped
return _decorator
class DefaultLanguageChanger(object):
def __init__(self, request):
self.request = request
self._app_path = None
@property
def app_path(self):
if self._app_path is None:
if settings.USE_I18N:
page_path = self.get_page_path(get_language_from_request(self.request))
else:
page_path = self.get_page_path(settings.LANGUAGE_CODE)
if page_path:
self._app_path = self.request.path_info[len(page_path):]
else:
self._app_path = self.request.path_info
return self._app_path
def get_page_path(self, lang):
page = getattr(self.request, 'current_page', None)
if page:
with force_language(lang):
try:
return page.get_absolute_url(language=lang, fallback=False)
except (Title.DoesNotExist, NoReverseMatch):
if hide_untranslated(lang) and settings.USE_I18N:
return '/%s/' % lang
else:
return page.get_absolute_url(language=lang, fallback=True)
else:
return '/%s/' % lang if settings.USE_I18N else '/'
def __call__(self, lang):
page_language = get_language_from_request(self.request)
with force_language(page_language):
try:
view = resolve(self.request.path_info)
except:
view = None
if hasattr(self.request, 'toolbar') and self.request.toolbar.obj:
with force_language(lang):
try:
return self.request.toolbar.obj.get_absolute_url()
except:
pass
elif view and not view.url_name in ('pages-details-by-slug', 'pages-root'):
view_name = view.url_name
if view.namespace:
view_name = "%s:%s" % (view.namespace, view_name)
url = None
with force_language(lang):
try:
url = reverse(view_name, args=view.args, kwargs=view.kwargs, current_app=view.app_name)
except NoReverseMatch:
pass
if url:
return url
return '%s%s' % (self.get_page_path(lang), self.app_path)
| bsd-3-clause |
Y3K/django | tests/multiple_database/routers.py | 379 | 1927 | from __future__ import unicode_literals
from django.db import DEFAULT_DB_ALIAS
class TestRouter(object):
"""
Vaguely behave like primary/replica, but the databases aren't assumed to
propagate changes.
"""
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or 'other'
return 'other'
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
def allow_migrate(self, db, app_label, **hints):
return True
class AuthRouter(object):
"""
Control all database operations on models in the contrib.auth application.
"""
def db_for_read(self, model, **hints):
"Point all read operations on auth models to 'default'"
if model._meta.app_label == 'auth':
# We use default here to ensure we can tell the difference
# between a read request and a write request for Auth objects
return 'default'
return None
def db_for_write(self, model, **hints):
"Point all operations on auth models to 'other'"
if model._meta.app_label == 'auth':
return 'other'
return None
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a model in Auth is involved"
if obj1._meta.app_label == 'auth' or obj2._meta.app_label == 'auth':
return True
return None
def allow_migrate(self, db, app_label, **hints):
"Make sure the auth app only appears on the 'other' db"
if app_label == 'auth':
return db == 'other'
return None
class WriteRouter(object):
# A router that only expresses an opinion on writes
def db_for_write(self, model, **hints):
return 'writer'
| bsd-3-clause |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/frames/boxcox_test.py | 12 | 5074 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test frame.box_cox() and frame.reverse_box_cox()"""
import unittest
from sparktkregtests.lib import sparktk_test
class BoxCoxTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(BoxCoxTest, self).setUp()
dataset =\
[[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],
[8.1583747730768401], [0.35889585616853292]]
schema = [("y", float)]
self.frame = self.context.frame.create(dataset, schema=schema)
def test_wt_default(self):
""" Test behaviour for default params, lambda = 0 """
self.frame.box_cox("y")
actual = self.frame.to_pandas()["y_lambda_0.0"].tolist()
expected =\
[1.7717791879837133, 2.1946810429706676,
2.1877697201262163, 2.0990449791729704, -1.0247230268174008]
self.assertItemsEqual(actual, expected)
def test_lambda(self):
""" Test wt for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
actual = self.frame.to_pandas()["y_lambda_0.3"].tolist()
expected =\
[2.3384668540844573, 3.1056915770236082,
3.0923547540771801, 2.9235756971904037, -0.88218677941017198]
self.assertItemsEqual(actual, expected)
def test_reverse_default(self):
""" Test reverse transform for default lambda = 0 """
self.frame.box_cox("y")
self.frame.reverse_box_cox("y_lambda_0.0",
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727425, 8.9771372790941815,
8.9153072947470804, 8.1583747730768401, 0.35889585616853298]
self.assertItemsEqual(actual, expected)
def test_reverse_lambda(self):
""" Test reverse transform for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
self.frame.reverse_box_cox("y_lambda_0.3", 0.3,
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727442, 8.9771372790941797,
8.9153072947470822, 8.1583747730768419,
0.35889585616853298]
self.assertItemsEqual(actual, expected)
@unittest.skip("req not clear")
def test_lambda_negative(self):
""" Test box cox for lambda -1 """
self.frame.box_cox("y", -1)
actual = self.frame.to_pandas()["y_lambda_-1.0"].tolist()
expected =\
[0.82996979614597488, 0.88860591423406388,
0.88783336715839256, 0.87742656744575354,
-1.7863236167608822]
self.assertItemsEqual(actual, expected)
def test_existing_boxcox_column(self):
""" Test behavior for existing boxcox column """
self.frame.box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.box_cox("y", 0.3)
def test_existing_reverse_column(self):
""" Test behavior for existing reverse boxcox column """
self.frame.reverse_box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.reverse_box_cox("y", 0.3)
@unittest.skip("Req not clear")
def test_negative_col_positive_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
frame.box_cox("y", 1)
actual = frame.to_pandas()["y_lambda_1.0"].tolist()
expected = [-2.0, -3.0, 0]
self.assertItemsEqual(actual, expected)
@unittest.skip("Req not clear")
def test_negative_col_frational_lambda(self):
"""Test behaviour for negative input column and negative lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y", 0.1)
@unittest.skip("Req not clear")
def test_negative_col_zero_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
vmagamedov/kinko | kinko/compile/incremental_dom.py | 1 | 9185 | from json.encoder import encode_basestring
from slimit import ast as js
from ..types import NamedArgMeta, VarArgsMeta, VarNamedArgsMeta
from ..utils import split_args, normalize_args
from ..nodes import Tuple, Symbol, Placeholder, String, Number
from ..utils import Environ
from ..compat import text_type
from ..checker import HTML_TAG_TYPE, GET_TYPE, IF1_TYPE, IF2_TYPE, JOIN1_TYPE
from ..checker import JOIN2_TYPE, get_type, DEF_TYPE, EACH_TYPE
from ..checker import returns_markup, contains_markup
from ..constant import SELF_CLOSING_ELEMENTS
def _str(value):
return js.String(encode_basestring(value))
def _text(value):
return js.ExprStatement(js.FunctionCall(js.Identifier('text'), [value]))
def _ctx_var(value):
return js.BracketAccessor(js.Identifier('ctx'), _str(value))
def _yield_writes(env, node):
if returns_markup(node):
for item in compile_stmt(env, node):
yield item
else:
yield _text(compile_expr(env, node))
def _el_open(tag, key=None, attrs=None, self_close=False):
fn = 'elementVoid' if self_close else 'elementOpen'
return js.ExprStatement(js.FunctionCall(js.Identifier(fn), [
_str(tag),
_str(key or ''),
js.Array([]),
js.Array(attrs or []),
]))
def _el_close(tag):
return js.ExprStatement(js.FunctionCall(js.Identifier('elementClose'),
[_str(tag)]))
def compile_if1_expr(env, node, test, then_):
test_expr = compile_expr(env, test)
then_expr = compile_expr(env, then_)
else_expr = js.Null(None)
return js.Conditional(test_expr, then_expr, else_expr)
def compile_if2_expr(env, node, test, then_, else_):
test_expr = compile_expr(env, test)
then_expr = compile_expr(env, then_)
else_expr = compile_expr(env, else_)
return js.Conditional(test_expr, then_expr, else_expr)
def compile_get_expr(env, node, obj, attr):
obj_expr = compile_expr(env, obj)
return js.BracketAccessor(obj_expr, _str(attr.name))
def compile_func_expr(env, node, *norm_args):
sym, args = node.values[0], node.values[1:]
pos_args, kw_args = split_args(args)
name_expr = js.DotAccessor(js.Identifier('builtins'),
js.Identifier(sym.name))
compiled_args = [compile_expr(env, value)
for value in pos_args]
compiled_args.append(js.Object([
js.Label(_str(text_type(key)), compile_expr(env, value))
for key, value in kw_args.items()
]))
return js.FunctionCall(name_expr, compiled_args)
EXPR_TYPES = {
IF1_TYPE: compile_if1_expr,
IF2_TYPE: compile_if2_expr,
GET_TYPE: compile_get_expr,
}
def compile_expr(env, node):
if isinstance(node, Tuple):
sym, args = node.values[0], node.values[1:]
assert sym.__type__
pos_args, kw_args = split_args(args)
norm_args = normalize_args(sym.__type__, pos_args, kw_args)
proc = EXPR_TYPES.get(sym.__type__, compile_func_expr)
return proc(env, node, *norm_args)
elif isinstance(node, Symbol):
if node.name in env:
return js.Identifier(env[node.name])
else:
return _ctx_var(node.name)
elif isinstance(node, Placeholder):
return js.Identifier(env[node.name])
elif isinstance(node, String):
return _str(text_type(node.value))
elif isinstance(node, Number):
return js.Number(text_type(node.value))
else:
raise TypeError('Unable to compile {!r} of type {!r} as expression'
.format(node, type(node)))
def compile_def_stmt(env, node, name_sym, body):
args = [a.__arg_name__ for a in get_type(node).__args__]
with env.push(args):
yield js.FuncDecl(js.Identifier(name_sym.name),
[js.Identifier(env[arg]) for arg in args],
list(compile_stmt(env, body)))
def compile_html_tag_stmt(env, node, attrs, body):
tag_name = node.values[0].name
self_closing = tag_name in SELF_CLOSING_ELEMENTS
compiled_attrs = []
for key, value in attrs.items():
compiled_attrs.append(_str(text_type(key)))
compiled_attrs.append(compile_expr(env, value))
yield _el_open(tag_name, None, compiled_attrs,
self_close=self_closing)
if self_closing:
assert not body, ('Positional args are not expected in the '
'self-closing elements')
return
for arg in body:
for item in _yield_writes(env, arg):
yield item
yield _el_close(tag_name)
def compile_if1_stmt(env, node, test, then_):
test_expr = compile_expr(env, test)
yield js.If(test_expr, js.Block(list(_yield_writes(env, then_))), None)
def compile_if2_stmt(env, node, test, then_, else_):
test_expr = compile_expr(env, test)
yield js.If(test_expr, js.Block(list(_yield_writes(env, then_))),
js.Block(list(_yield_writes(env, else_))))
def compile_each_stmt(env, node, var, col, body):
col_expr = compile_expr(env, col)
with env.push(['_i']):
i_expr = js.Identifier(env['_i'])
with env.push([var.name]):
var_stmt = js.VarStatement([
js.Assign('=', js.Identifier(env[var.name]),
js.BracketAccessor(col_expr, i_expr)),
])
yield js.For(
js.VarStatement([js.VarDecl(i_expr, js.Number('0'))]),
js.BinOp('<', i_expr,
js.DotAccessor(col_expr,
js.Identifier('length'))),
js.UnaryOp('++', i_expr, postfix=True),
js.Block([var_stmt] + list(compile_stmt(env, body))),
)
def compile_join1_stmt(env, node, col):
for value in col.values:
for item in _yield_writes(env, value):
yield item
def compile_join2_stmt(env, node, sep, col):
for i, value in enumerate(col.values):
if i:
yield _text(_str(sep.value))
for item in _yield_writes(env, value):
yield item
STMT_TYPES = {
DEF_TYPE: compile_def_stmt,
HTML_TAG_TYPE: compile_html_tag_stmt,
IF1_TYPE: compile_if1_stmt,
IF2_TYPE: compile_if2_stmt,
EACH_TYPE: compile_each_stmt,
JOIN1_TYPE: compile_join1_stmt,
JOIN2_TYPE: compile_join2_stmt,
}
def compile_func_arg(env, type_, value):
if contains_markup(type_):
return js.FuncExpr(None, [], list(compile_stmt(env, value)))
else:
return compile_expr(env, value)
def compile_func_stmt(env, node, *norm_args):
sym = node.values[0]
arg_exprs = []
for arg_type, arg_value in zip(sym.__type__.__args__, norm_args):
if isinstance(arg_type, NamedArgMeta):
type_ = arg_type.__arg_type__
arg = compile_func_arg(env, type_, arg_value)
elif isinstance(arg_type, VarArgsMeta):
type_ = arg_type.__arg_type__
arg = js.Array([compile_func_arg(env, type_, v)
for v in arg_value])
elif isinstance(arg_type, VarNamedArgsMeta):
type_ = arg_type.__arg_type__
arg = js.Object([js.Label(_str(k), compile_func_arg(env, type_, v))
for k, v in arg_value.items()])
else:
arg = compile_func_arg(env, arg_type, arg_value)
arg_exprs.append(arg)
if sym.ns:
if sym.ns == '.':
name_expr = js.Identifier(sym.rel)
else:
name_expr = js.DotAccessor(js.Identifier(sym.ns),
js.Identifier(sym.rel))
else:
name_expr = js.DotAccessor(js.Identifier('builtins'),
js.Identifier(sym.name))
yield js.ExprStatement(js.FunctionCall(name_expr, arg_exprs))
def compile_stmt(env, node):
if isinstance(node, Tuple):
sym, args = node.values[0], node.values[1:]
assert sym.__type__
pos_args, kw_args = split_args(args)
norm_args = normalize_args(sym.__type__, pos_args, kw_args)
proc = STMT_TYPES.get(sym.__type__, compile_func_stmt)
for item in proc(env, node, *norm_args):
yield item
elif isinstance(node, Symbol):
if node.name in env:
yield _text(js.Identifier(env[node.name]))
else:
yield _text(_ctx_var(node.name))
elif isinstance(node, Placeholder):
yield js.ExprStatement(js.FunctionCall(js.Identifier(env[node.name]),
[]))
elif isinstance(node, String):
yield _text(js.String(node.value))
elif isinstance(node, Number):
yield _text(js.Number(node.value))
else:
raise TypeError('Unable to compile {!r} of type {!r} as statement'
.format(node, type(node)))
def compile_stmts(env, nodes):
for node in nodes:
for item in compile_stmt(env, node):
yield item
def compile_module(body):
env = Environ()
mod = js.Program(list(compile_stmts(env, body.values)))
return mod
def dumps(node):
return node.to_ecma() + '\n'
| bsd-3-clause |
mottosso/pyblish-magenta | pyblish_magenta/vendor/capture.py | 1 | 15009 | """Maya Capture
Playblasting with independent viewport, camera and display options
"""
import re
import sys
import contextlib
from maya import cmds
version_info = (1, 1, 0)
__version__ = "%s.%s.%s" % version_info
__license__ = "MIT"
def capture(camera=None,
width=None,
height=None,
filename=None,
start_frame=None,
end_frame=None,
frame=None,
format='qt',
compression='h264',
off_screen=False,
viewer=True,
isolate=None,
maintain_aspect_ratio=True,
overwrite=False,
raw_frame_numbers=False,
camera_options=None,
viewport_options=None,
display_options=None,
complete_filename=None):
"""Playblast in an independent panel
Arguments:
camera (str, optional): Name of camera, defaults to "persp"
width (int, optional): Width of output in pixels
height (int, optional): Height of output in pixels
filename (str, optional): Name of output file. If
none is specified, no files are saved.
start_frame (float, optional): Defaults to current start frame.
end_frame (float, optional): Defaults to current end frame.
frame (float or tuple, optional): A single frame or list of frames.
Use this to capture a single frame or an arbitrary sequence of
frames.
format (str, optional): Name of format, defaults to "qt".
compression (str, optional): Name of compression, defaults to "h264"
off_screen (bool, optional): Whether or not to playblast off screen
viewer (bool, optional): Display results in native player
isolate (list): List of nodes to isolate upon capturing
maintain_aspect_ratio (bool, optional): Modify height in order to
maintain aspect ratio.
overwrite (bool, optional): Whether or not to overwrite if file
already exists. If disabled and file exists and error will be
raised.
raw_frame_numbers (bool, optional): Whether or not to use the exact
frame numbers from the scene or capture to a sequence starting at
zero. Defaults to False. When set to True `viewer` can't be used
and will be forced to False.
camera_options (CameraOptions, optional): Supplied camera options,
using :class:`CameraOptions`
viewport_options (ViewportOptions, optional): Supplied viewport
options, using :class:`ViewportOptions`
display_options (DisplayOptions, optional): Supplied display
options, using :class:`DisplayOptions`
complete_filename (str, optional): Exact name of output file. Use this
to override the output of `filename` so it excludes frame padding.
Example:
>>> # Launch default capture
>>> capture()
>>> # Launch capture with custom viewport settings
>>> view_opts = ViewportOptions()
>>> view_opts.grid = False
>>> view_opts.polymeshes = True
>>> view_opts.displayAppearance = "wireframe"
>>> cam_opts = CameraOptions()
>>> cam_opts.displayResolution = True
>>> capture('myCamera', 800, 600,
... viewport_options=view_opts,
... camera_options=cam_opts)
"""
camera = camera or "persp"
# Ensure camera exists
if not cmds.objExists(camera):
raise RuntimeError("Camera does not exist: {0}".format(camera))
width = width or cmds.getAttr("defaultResolution.width")
height = height or cmds.getAttr("defaultResolution.height")
if maintain_aspect_ratio:
ratio = cmds.getAttr("defaultResolution.deviceAspectRatio")
height = width / ratio
start_frame = start_frame or cmds.playbackOptions(minTime=True, query=True)
end_frame = end_frame or cmds.playbackOptions(maxTime=True, query=True)
# We need to wrap `completeFilename`, otherwise even when None is provided
# it will use filename as the exact name. Only when lacking as argument
# does it function correctly.
playblast_kwargs = dict()
if complete_filename:
playblast_kwargs['completeFilename'] = complete_filename
if frame:
playblast_kwargs['frame'] = frame
# (#21) Bugfix: `maya.cmds.playblast` suffers from undo bug where it
# always sets the currentTime to frame 1. By setting currentTime before
# the playblast call it'll undo correctly.
cmds.currentTime(cmds.currentTime(q=1))
padding = 10 # Extend panel to accommodate for OS window manager
with _independent_panel(width=width + padding,
height=height + padding) as panel:
cmds.setFocus(panel)
with contextlib.nested(
_maintain_camera(panel, camera),
_applied_viewport_options(viewport_options, panel),
_applied_camera_options(camera_options, panel, camera),
_applied_display_options(display_options),
_isolated_nodes(isolate, panel),
_maintained_time()):
output = cmds.playblast(
compression=compression,
format=format,
percent=100,
quality=100,
viewer=viewer,
startTime=start_frame,
endTime=end_frame,
offScreen=off_screen,
forceOverwrite=overwrite,
filename=filename,
widthHeight=[width, height],
rawFrameNumbers=raw_frame_numbers,
**playblast_kwargs)
return output
def snap(*args, **kwargs):
"""Single frame playblast in an independent panel.
The arguments of `capture` are all valid here as well, except for
`start_frame` and `end_frame`.
Arguments:
frame (float, optional): The frame to snap. If not provided current
frame is used.
clipboard (bool, optional): Whether to add the output image to the
global clipboard. This allows to easily paste the snapped image
into another application, eg. into Photoshop.
Keywords:
See `capture`.
"""
# capture single frame
frame = kwargs.pop('frame', cmds.currentTime(q=1))
kwargs['start_frame'] = frame
kwargs['end_frame'] = frame
kwargs['frame'] = frame
if not isinstance(frame, (int, float)):
raise TypeError("frame must be a single frame (integer or float). "
"Use `capture()` for sequences.")
# override capture defaults
format = kwargs.pop('format', "image")
compression = kwargs.pop('compression', "png")
viewer = kwargs.pop('viewer', False)
raw_frame_numbers = kwargs.pop('raw_frame_numbers', True)
kwargs['compression'] = compression
kwargs['format'] = format
kwargs['viewer'] = viewer
kwargs['raw_frame_numbers'] = raw_frame_numbers
# pop snap only keyword arguments
clipboard = kwargs.pop('clipboard', False)
# perform capture
output = capture(*args, **kwargs)
def replace(m):
"""Substitute # with frame number"""
return str(int(frame)).zfill(len(m.group()))
output = re.sub("#+", replace, output)
# add image to clipboard
if clipboard:
_image_to_clipboard(output)
return output
class ViewportOptions:
"""Viewport options for :func:`capture`"""
useDefaultMaterial = False
wireframeOnShaded = False
displayAppearance = 'smoothShaded'
selectionHiliteDisplay = False
headsUpDisplay = True
# Visibility flags
nurbsCurves = False
nurbsSurfaces = False
polymeshes = True
subdivSurfaces = False
cameras = False
lights = False
grid = False
joints = False
ikHandles = False
deformers = False
dynamics = False
fluids = False
hairSystems = False
follicles = False
nCloths = False
nParticles = False
nRigids = False
dynamicConstraints = False
locators = False
manipulators = False
dimensions = False
handles = False
pivots = False
textures = False
strokes = False
class CameraOptions:
"""Camera settings for :func:`capture`
Camera options are applied to the specified camera and
then reverted once the capture is complete.
"""
displayGateMask = False
displayResolution = False
displayFilmGate = False
displayFieldChart = False
displaySafeAction = False
displaySafeTitle = False
displayFilmPivot = False
displayFilmOrigin = False
overscan = 1.0
class DisplayOptions:
"""Display options for :func:`capture`
Use this struct for background color, anti-alias and other
display-related options.
"""
displayGradient = True
background = (0.631, 0.631, 0.631)
backgroundTop = (0.535, 0.617, 0.702)
backgroundBottom = (0.052, 0.052, 0.052)
def _parse_options(options):
"""Return dictionary of properties from option-objects"""
opts = dict()
for attr in dir(options):
if attr.startswith("__"):
continue
opts[attr] = getattr(options, attr)
return opts
@contextlib.contextmanager
def _independent_panel(width, height):
"""Create capture-window context without decorations
Arguments:
width (int): Width of panel
height (int): Height of panel
Example:
>>> with _independent_panel(800, 600):
... cmds.capture()
"""
# center panel on screen
screen_width, screen_height = _get_screen_size()
topLeft = [int((screen_height-height)/2.0),
int((screen_width-width)/2.0)]
window = cmds.window(width=width,
height=height,
topLeftCorner=topLeft,
menuBarVisible=False,
titleBar=False)
cmds.paneLayout()
panel = cmds.modelPanel(menuBarVisible=False,
label='CapturePanel')
# Hide icons under panel menus
bar_layout = cmds.modelPanel(panel, q=True, barLayout=True)
cmds.frameLayout(bar_layout, e=True, collapse=True)
cmds.showWindow(window)
# Set the modelEditor of the modelPanel as the active view so it takes
# the playback focus. Does seem redundant with the `refresh` added in.
editor = cmds.modelPanel(panel, query=True, modelEditor=True)
cmds.modelEditor(editor, e=1, activeView=True)
# Force a draw refresh of Maya so it keeps focus on the new panel
# This focus is required to force preview playback in the independent panel
cmds.refresh(force=True)
try:
yield panel
finally:
# Delete the panel to fix memory leak (about 5 mb per capture)
cmds.deleteUI(panel, panel=True)
cmds.deleteUI(window)
@contextlib.contextmanager
def _applied_viewport_options(options, panel):
"""Context manager for applying `options` to `panel`"""
options = options or ViewportOptions()
options = _parse_options(options)
cmds.modelEditor(panel,
edit=True,
allObjects=False,
grid=False,
manipulators=False)
cmds.modelEditor(panel, edit=True, **options)
yield
@contextlib.contextmanager
def _applied_camera_options(options, panel, camera):
"""Context manager for applying `options` to `camera`"""
options = options or CameraOptions()
options = _parse_options(options)
old_options = dict()
for opt in options:
try:
old_options[opt] = cmds.getAttr(camera + "." + opt)
except:
sys.stderr.write("Could not get camera attribute "
"for capture: %s" % opt)
delattr(options, opt)
for opt, value in options.iteritems():
cmds.setAttr(camera + "." + opt, value)
try:
yield
finally:
if old_options:
for opt, value in old_options.iteritems():
cmds.setAttr(camera + "." + opt, value)
@contextlib.contextmanager
def _applied_display_options(options):
"""Context manager for setting background color display options."""
options = options or DisplayOptions()
colors = ['background', 'backgroundTop', 'backgroundBottom']
preferences = ['displayGradient']
# Store current settings
original = {}
for color in colors:
original[color] = cmds.displayRGBColor(color, query=True) or []
for preference in preferences:
original[preference] = cmds.displayPref(query=True, **{preference: True})
# Apply settings
for color in colors:
value = getattr(options, color)
cmds.displayRGBColor(color, *value)
for preference in preferences:
value = getattr(options, preference)
cmds.displayPref(**{preference: value})
try:
yield
finally:
# Restore original settings
for color in colors:
cmds.displayRGBColor(color, *original[color])
for preference in preferences:
cmds.displayPref(**{preference: original[preference]})
@contextlib.contextmanager
def _isolated_nodes(nodes, panel):
"""Context manager for isolating `nodes` in `panel`"""
if nodes is not None:
cmds.isolateSelect(panel, state=True)
for obj in nodes:
cmds.isolateSelect(panel, addDagObject=obj)
yield
@contextlib.contextmanager
def _maintained_time():
"""Context manager for preserving (resetting) the time after the context"""
current_time = cmds.currentTime(query=1)
try:
yield
finally:
cmds.currentTime(current_time)
@contextlib.contextmanager
def _maintain_camera(panel, camera):
state = {}
if not _in_standalone():
cmds.lookThru(panel, camera)
else:
state = dict((camera, cmds.getAttr(camera + ".rnd"))
for camera in cmds.ls(type="camera"))
cmds.setAttr(camera + ".rnd", True)
try:
yield
finally:
for camera, renderable in state.iteritems():
cmds.setAttr(camera + ".rnd", renderable)
def _image_to_clipboard(path):
"""Copies the image at path to the system's global clipboard."""
if _in_standalone():
raise Exception("Cannot copy to clipboard from Maya Standalone")
import PySide.QtGui
image = PySide.QtGui.QImage(path)
clipboard = PySide.QtGui.QApplication.clipboard()
clipboard.setImage(image, mode=PySide.QtGui.QClipboard.Clipboard)
def _get_screen_size():
"""Return available screen size without space occupied by taskbar"""
if _in_standalone():
return [0, 0]
import PySide.QtGui
rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)
return [rect.width(), rect.height()]
def _in_standalone():
return not hasattr(cmds, "about") or cmds.about(batch=True)
| lgpl-3.0 |
embecosm/bachmann-gdb | gdb/python/lib/gdb/prompt.py | 137 | 4210 | # Extended prompt utilities.
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
| gpl-2.0 |
dfalt974/SickRage | lib/unidecode/x096.py | 252 | 4610 | data = (
'Fa ', # 0x00
'Ge ', # 0x01
'He ', # 0x02
'Kun ', # 0x03
'Jiu ', # 0x04
'Yue ', # 0x05
'Lang ', # 0x06
'Du ', # 0x07
'Yu ', # 0x08
'Yan ', # 0x09
'Chang ', # 0x0a
'Xi ', # 0x0b
'Wen ', # 0x0c
'Hun ', # 0x0d
'Yan ', # 0x0e
'E ', # 0x0f
'Chan ', # 0x10
'Lan ', # 0x11
'Qu ', # 0x12
'Hui ', # 0x13
'Kuo ', # 0x14
'Que ', # 0x15
'Ge ', # 0x16
'Tian ', # 0x17
'Ta ', # 0x18
'Que ', # 0x19
'Kan ', # 0x1a
'Huan ', # 0x1b
'Fu ', # 0x1c
'Fu ', # 0x1d
'Le ', # 0x1e
'Dui ', # 0x1f
'Xin ', # 0x20
'Qian ', # 0x21
'Wu ', # 0x22
'Yi ', # 0x23
'Tuo ', # 0x24
'Yin ', # 0x25
'Yang ', # 0x26
'Dou ', # 0x27
'E ', # 0x28
'Sheng ', # 0x29
'Ban ', # 0x2a
'Pei ', # 0x2b
'Keng ', # 0x2c
'Yun ', # 0x2d
'Ruan ', # 0x2e
'Zhi ', # 0x2f
'Pi ', # 0x30
'Jing ', # 0x31
'Fang ', # 0x32
'Yang ', # 0x33
'Yin ', # 0x34
'Zhen ', # 0x35
'Jie ', # 0x36
'Cheng ', # 0x37
'E ', # 0x38
'Qu ', # 0x39
'Di ', # 0x3a
'Zu ', # 0x3b
'Zuo ', # 0x3c
'Dian ', # 0x3d
'Ling ', # 0x3e
'A ', # 0x3f
'Tuo ', # 0x40
'Tuo ', # 0x41
'Po ', # 0x42
'Bing ', # 0x43
'Fu ', # 0x44
'Ji ', # 0x45
'Lu ', # 0x46
'Long ', # 0x47
'Chen ', # 0x48
'Xing ', # 0x49
'Duo ', # 0x4a
'Lou ', # 0x4b
'Mo ', # 0x4c
'Jiang ', # 0x4d
'Shu ', # 0x4e
'Duo ', # 0x4f
'Xian ', # 0x50
'Er ', # 0x51
'Gui ', # 0x52
'Yu ', # 0x53
'Gai ', # 0x54
'Shan ', # 0x55
'Xun ', # 0x56
'Qiao ', # 0x57
'Xing ', # 0x58
'Chun ', # 0x59
'Fu ', # 0x5a
'Bi ', # 0x5b
'Xia ', # 0x5c
'Shan ', # 0x5d
'Sheng ', # 0x5e
'Zhi ', # 0x5f
'Pu ', # 0x60
'Dou ', # 0x61
'Yuan ', # 0x62
'Zhen ', # 0x63
'Chu ', # 0x64
'Xian ', # 0x65
'Tou ', # 0x66
'Nie ', # 0x67
'Yun ', # 0x68
'Xian ', # 0x69
'Pei ', # 0x6a
'Pei ', # 0x6b
'Zou ', # 0x6c
'Yi ', # 0x6d
'Dui ', # 0x6e
'Lun ', # 0x6f
'Yin ', # 0x70
'Ju ', # 0x71
'Chui ', # 0x72
'Chen ', # 0x73
'Pi ', # 0x74
'Ling ', # 0x75
'Tao ', # 0x76
'Xian ', # 0x77
'Lu ', # 0x78
'Sheng ', # 0x79
'Xian ', # 0x7a
'Yin ', # 0x7b
'Zhu ', # 0x7c
'Yang ', # 0x7d
'Reng ', # 0x7e
'Shan ', # 0x7f
'Chong ', # 0x80
'Yan ', # 0x81
'Yin ', # 0x82
'Yu ', # 0x83
'Ti ', # 0x84
'Yu ', # 0x85
'Long ', # 0x86
'Wei ', # 0x87
'Wei ', # 0x88
'Nie ', # 0x89
'Dui ', # 0x8a
'Sui ', # 0x8b
'An ', # 0x8c
'Huang ', # 0x8d
'Jie ', # 0x8e
'Sui ', # 0x8f
'Yin ', # 0x90
'Gai ', # 0x91
'Yan ', # 0x92
'Hui ', # 0x93
'Ge ', # 0x94
'Yun ', # 0x95
'Wu ', # 0x96
'Wei ', # 0x97
'Ai ', # 0x98
'Xi ', # 0x99
'Tang ', # 0x9a
'Ji ', # 0x9b
'Zhang ', # 0x9c
'Dao ', # 0x9d
'Ao ', # 0x9e
'Xi ', # 0x9f
'Yin ', # 0xa0
'[?] ', # 0xa1
'Rao ', # 0xa2
'Lin ', # 0xa3
'Tui ', # 0xa4
'Deng ', # 0xa5
'Pi ', # 0xa6
'Sui ', # 0xa7
'Sui ', # 0xa8
'Yu ', # 0xa9
'Xian ', # 0xaa
'Fen ', # 0xab
'Ni ', # 0xac
'Er ', # 0xad
'Ji ', # 0xae
'Dao ', # 0xaf
'Xi ', # 0xb0
'Yin ', # 0xb1
'E ', # 0xb2
'Hui ', # 0xb3
'Long ', # 0xb4
'Xi ', # 0xb5
'Li ', # 0xb6
'Li ', # 0xb7
'Li ', # 0xb8
'Zhui ', # 0xb9
'He ', # 0xba
'Zhi ', # 0xbb
'Zhun ', # 0xbc
'Jun ', # 0xbd
'Nan ', # 0xbe
'Yi ', # 0xbf
'Que ', # 0xc0
'Yan ', # 0xc1
'Qian ', # 0xc2
'Ya ', # 0xc3
'Xiong ', # 0xc4
'Ya ', # 0xc5
'Ji ', # 0xc6
'Gu ', # 0xc7
'Huan ', # 0xc8
'Zhi ', # 0xc9
'Gou ', # 0xca
'Jun ', # 0xcb
'Ci ', # 0xcc
'Yong ', # 0xcd
'Ju ', # 0xce
'Chu ', # 0xcf
'Hu ', # 0xd0
'Za ', # 0xd1
'Luo ', # 0xd2
'Yu ', # 0xd3
'Chou ', # 0xd4
'Diao ', # 0xd5
'Sui ', # 0xd6
'Han ', # 0xd7
'Huo ', # 0xd8
'Shuang ', # 0xd9
'Guan ', # 0xda
'Chu ', # 0xdb
'Za ', # 0xdc
'Yong ', # 0xdd
'Ji ', # 0xde
'Xi ', # 0xdf
'Chou ', # 0xe0
'Liu ', # 0xe1
'Li ', # 0xe2
'Nan ', # 0xe3
'Xue ', # 0xe4
'Za ', # 0xe5
'Ji ', # 0xe6
'Ji ', # 0xe7
'Yu ', # 0xe8
'Yu ', # 0xe9
'Xue ', # 0xea
'Na ', # 0xeb
'Fou ', # 0xec
'Se ', # 0xed
'Mu ', # 0xee
'Wen ', # 0xef
'Fen ', # 0xf0
'Pang ', # 0xf1
'Yun ', # 0xf2
'Li ', # 0xf3
'Li ', # 0xf4
'Ang ', # 0xf5
'Ling ', # 0xf6
'Lei ', # 0xf7
'An ', # 0xf8
'Bao ', # 0xf9
'Meng ', # 0xfa
'Dian ', # 0xfb
'Dang ', # 0xfc
'Xing ', # 0xfd
'Wu ', # 0xfe
'Zhao ', # 0xff
)
| gpl-3.0 |
xzYue/odoo | openerp/tools/image.py | 172 | 10660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from PIL import Image
from PIL import ImageEnhance
from random import randint
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
if size == (None, None):
return base64_source
image_stream = StringIO.StringIO(base64_source.decode(encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
image = image.convert("RGB")
background_stream = StringIO.StringIO()
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2))
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
alpha = False
if image.mode in ('RGBA', 'LA') or (image.mode == 'P' and 'transparency' in image.info):
alpha = image.convert('RGBA').split()[-1]
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
if alpha:
image.putalpha(alpha)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = StringIO.StringIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(StringIO.StringIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randint(32, 224), randint(32, 224), randint(32, 224))
image.paste(color)
image.paste(original, mask=original)
# return the new image
buffer = StringIO.StringIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returnes values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small)
return return_dict
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = file(sys.argv[1],'rb').read().encode('base64')
new = image_resize_image(img, (128,100))
file(sys.argv[2], 'wb').write(new.decode('base64'))
| agpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py | 101 | 3046 | """
Unit tests for degree centrality.
"""
from nose.tools import *
import networkx as nx
class TestDegreeCentrality:
def __init__(self):
self.K = nx.krackhardt_kite_graph()
self.P3 = nx.path_graph(3)
self.K5 = nx.complete_graph(5)
F = nx.Graph() # Florentine families
F.add_edge('Acciaiuoli','Medici')
F.add_edge('Castellani','Peruzzi')
F.add_edge('Castellani','Strozzi')
F.add_edge('Castellani','Barbadori')
F.add_edge('Medici','Barbadori')
F.add_edge('Medici','Ridolfi')
F.add_edge('Medici','Tornabuoni')
F.add_edge('Medici','Albizzi')
F.add_edge('Medici','Salviati')
F.add_edge('Salviati','Pazzi')
F.add_edge('Peruzzi','Strozzi')
F.add_edge('Peruzzi','Bischeri')
F.add_edge('Strozzi','Ridolfi')
F.add_edge('Strozzi','Bischeri')
F.add_edge('Ridolfi','Tornabuoni')
F.add_edge('Tornabuoni','Guadagni')
F.add_edge('Albizzi','Ginori')
F.add_edge('Albizzi','Guadagni')
F.add_edge('Bischeri','Guadagni')
F.add_edge('Guadagni','Lamberteschi')
self.F = F
G = nx.DiGraph()
G.add_edge(0,5)
G.add_edge(1,5)
G.add_edge(2,5)
G.add_edge(3,5)
G.add_edge(4,5)
G.add_edge(5,6)
G.add_edge(5,7)
G.add_edge(5,8)
self.G = G
def test_degree_centrality_1(self):
d = nx.degree_centrality(self.K5)
exact = dict(zip(range(5), [1]*5))
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_degree_centrality_2(self):
d = nx.degree_centrality(self.P3)
exact = {0:0.5, 1:1, 2:0.5}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_degree_centrality_3(self):
d = nx.degree_centrality(self.K)
exact = {0:.444, 1:.444, 2:.333, 3:.667, 4:.333,
5:.556, 6:.556, 7:.333, 8:.222, 9:.111}
for n,dc in d.items():
assert_almost_equal(exact[n], float("%5.3f" % dc))
def test_degree_centrality_4(self):
d = nx.degree_centrality(self.F)
names = sorted(self.F.nodes())
dcs = [0.071, 0.214, 0.143, 0.214, 0.214, 0.071, 0.286,
0.071, 0.429, 0.071, 0.214, 0.214, 0.143, 0.286, 0.214]
exact = dict(zip(names, dcs))
for n,dc in d.items():
assert_almost_equal(exact[n], float("%5.3f" % dc))
def test_indegree_centrality(self):
d = nx.in_degree_centrality(self.G)
exact = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.625, 6: 0.125, 7: 0.125, 8: 0.125}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_outdegree_centrality(self):
d = nx.out_degree_centrality(self.G)
exact = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125,
4: 0.125, 5: 0.375, 6: 0.0, 7: 0.0, 8: 0.0}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
| agpl-3.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_virtual_address.py | 16 | 16948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_virtual_address
short_description: Manage LTM virtual addresses on a BIG-IP.
description:
- Manage LTM virtual addresses on a BIG-IP.
version_added: "2.4"
options:
address:
description:
- Virtual address. This value cannot be modified after it is set.
required: True
aliases:
- name
netmask:
description:
- Netmask of the provided virtual address. This value cannot be
modified after it is set.
default: 255.255.255.255
connection_limit:
description:
- Specifies the number of concurrent connections that the system
allows on this virtual address.
arp_state:
description:
- Specifies whether the system accepts ARP requests. When (disabled),
specifies that the system does not accept ARP requests. Note that
both ARP and ICMP Echo must be disabled in order for forwarding
virtual servers using that virtual address to forward ICMP packets.
If (enabled), then the packets are dropped.
choices:
- enabled
- disabled
auto_delete:
description:
- Specifies whether the system automatically deletes the virtual
address with the deletion of the last associated virtual server.
When C(disabled), specifies that the system leaves the virtual
address even when all associated virtual servers have been deleted.
When creating the virtual address, the default value is C(enabled).
choices:
- enabled
- disabled
icmp_echo:
description:
- Specifies how the systems sends responses to (ICMP) echo requests
on a per-virtual address basis for enabling route advertisement.
When C(enabled), the BIG-IP system intercepts ICMP echo request
packets and responds to them directly. When C(disabled), the BIG-IP
system passes ICMP echo requests through to the backend servers.
When (selective), causes the BIG-IP system to internally enable or
disable responses based on virtual server state; C(when_any_available),
C(when_all_available, or C(always), regardless of the state of any
virtual servers.
choices:
- enabled
- disabled
- selective
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
advertise_route:
description:
- Specifies what routes of the virtual address the system advertises.
When C(when_any_available), advertises the route when any virtual
server is available. When C(when_all_available), advertises the
route when all virtual servers are available. When (always), always
advertises the route regardless of the virtual servers available.
choices:
- always
- when_all_available
- when_any_available
use_route_advertisement:
description:
- Specifies whether the system uses route advertisement for this
virtual address. When disabled, the system does not advertise
routes for this virtual address.
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host. This is as easy as pip
install netaddr.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
address: "10.10.10.10"
delegate_to: localhost
- name: Enable route advertisement on the virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
address: "10.10.10.10"
use_route_advertisement: yes
delegate_to: localhost
'''
RETURN = '''
use_route_advertisement:
description: The new setting for whether to use route advertising or not.
returned: changed
type: bool
sample: true
auto_delete:
description: New setting for auto deleting virtual address.
returned: changed
type: string
sample: enabled
icmp_echo:
description: New ICMP echo setting applied to virtual address.
returned: changed
type: string
sample: disabled
connection_limit:
description: The new connection limit of the virtual address.
returned: changed
type: int
sample: 1000
netmask:
description: The netmask of the virtual address.
returned: created
type: int
sample: 2345
arp_state:
description: The new way the virtual address handles ARP requests.
returned: changed
type: string
sample: disabled
address:
description: The address of the virtual address.
returned: created
type: int
sample: 2345
state:
description: The new state of the virtual address.
returned: changed
type: string
sample: disabled
'''
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
class Parameters(AnsibleF5Parameters):
api_map = {
'routeAdvertisement': 'use_route_advertisement',
'autoDelete': 'auto_delete',
'icmpEcho': 'icmp_echo',
'connectionLimit': 'connection_limit',
'serverScope': 'advertise_route',
'mask': 'netmask',
'arp': 'arp_state'
}
updatables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'arp_state', 'enabled', 'advertise_route'
]
returnables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'netmask', 'arp_state', 'address', 'state'
]
api_attributes = [
'routeAdvertisement', 'autoDelete', 'icmpEcho', 'connectionLimit',
'advertiseRoute', 'arp', 'mask', 'enabled', 'serverScope'
]
@property
def advertise_route(self):
if self._values['advertise_route'] is None:
return None
elif self._values['advertise_route'] in ['any', 'when_any_available']:
return 'any'
elif self._values['advertise_route'] in ['all', 'when_all_available']:
return 'all'
elif self._values['advertise_route'] in ['none', 'always']:
return 'none'
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def use_route_advertisement(self):
if self._values['use_route_advertisement'] is None:
return None
elif self._values['use_route_advertisement'] in BOOLEANS_TRUE:
return 'enabled'
elif self._values['use_route_advertisement'] == 'enabled':
return 'enabled'
else:
return 'disabled'
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return 'yes'
elif self._values['enabled'] in BOOLEANS_TRUE:
return 'yes'
elif self._values['state'] == 'disabled':
return 'no'
elif self._values['enabled'] in BOOLEANS_FALSE:
return 'no'
else:
return None
@property
def address(self):
if self._values['address'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['address'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'address' is not a valid IP address"
)
@property
def netmask(self):
if self._values['netmask'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['netmask'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'netmask' is not a valid IP address"
)
@property
def auto_delete(self):
if self._values['auto_delete'] is None:
return None
elif self._values['auto_delete'] in BOOLEANS_TRUE:
return True
elif self._values['auto_delete'] == 'enabled':
return True
else:
return False
@property
def state(self):
if self.enabled == 'yes' and self._values['state'] != 'present':
return 'enabled'
elif self.enabled == 'no':
return 'disabled'
else:
return self._values['state']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def exists(self):
result = self.client.api.tm.ltm.virtual_address_s.virtual_address.exists(
name=self.want.address,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if self.want.netmask is not None:
if self.have.netmask != self.want.netmask:
raise F5ModuleError(
"The netmask cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if self.want.address is not None:
if self.have.address != self.want.address:
raise F5ModuleError(
"The address cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the virtual address")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.virtual_address_s.virtual_address.create(
name=self.want.address,
partition=self.want.partition,
address=self.want.address,
**params
)
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the virtual address")
return True
def remove_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
address=dict(
type='str',
required=True,
aliases=['name']
),
netmask=dict(
type='str',
default='255.255.255.255',
),
connection_limit=dict(
type='int'
),
arp_state=dict(
choices=['enabled', 'disabled'],
),
auto_delete=dict(
choices=['enabled', 'disabled'],
),
icmp_echo=dict(
choices=['enabled', 'disabled', 'selective'],
),
advertise_route=dict(
choices=['always', 'when_all_available', 'when_any_available'],
),
use_route_advertisement=dict(
type='bool'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| bsd-3-clause |
spektom/incubator-airflow | airflow/contrib/operators/sagemaker_endpoint_operator.py | 5 | 1207 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_endpoint`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.sagemaker_endpoint import SageMakerEndpointOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_endpoint`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
glenntanner3/devedeng | src/devedeng/interface_manager.py | 4 | 25492 | # Copyright 2014 (C) Raster Software Vigo (Sergio Costas)
#
# This file is part of DeVeDe-NG
#
# DeVeDe-NG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DeVeDe-NG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gi.repository import GObject,Gdk
class interface_manager(GObject.GObject):
""" This class allows to automatically generate variables for a GLADE interface,
set the widgets in the interface to their values, and copy the current values
in the widgets to the variables """
def __init__(self):
GObject.GObject.__init__(self)
self.interface_groups = {}
self.interface_toggles = []
self.interface_dualtoggles = []
self.interface_labels = []
self.interface_text = []
self.interface_show_hide = []
self.interface_enable_disable = []
self.interface_float_adjustments = []
self.interface_integer_adjustments = []
self.interface_lists = []
self.interface_colorbuttons = []
self.interface_fontbuttons = []
self.interface_filebuttons = []
self.interface_comboboxes = []
def add_group(self,group_name,radiobutton_list,default_value,callback = None):
""" Adds a group of radiobuttons and creates an internal variable with
the name group_name, setting it to default_value. The
value for the variable will be the name of the active
radiobutton """
if (default_value != None):
exec('self.'+group_name+' = "'+str(default_value)+'"')
else:
exec('self.'+group_name+' = None')
self.interface_groups[group_name] = ( radiobutton_list, callback )
def add_toggle(self,toggle_name,default_value,callback = None):
""" Adds an internal variable with the name toggle_name, linked to a widget
element with the same name (must be or inherint from Gtk.ToogleButton).
The default value can be True of False """
exec('self.'+toggle_name+' = '+str(default_value))
self.interface_toggles.append( (toggle_name, callback) )
def add_dualtoggle(self,toggle_name,toggle2,default_value,callback = None):
""" Adds an internal variable with the name toggle_name, linked to widget
elements with names toggle_nane and toggle2 (must be or inherint from Gtk.ToogleButton).
The default value can be True of False, with True being toggle_name active, and False
being toggle2 active """
exec('self.'+toggle_name+' = '+str(default_value))
self.interface_dualtoggles.append( (toggle_name, toggle2, callback) )
def add_text(self,text_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.TextEntry or a Gtk.Label).
The default value can be a text or None """
if (default_value != None):
exec('self.'+text_name+' = "'+str(default_value).replace('\"','\\"')+'"')
else:
exec('self.'+text_name+' = None')
self.interface_text.append( (text_name, callback) )
def add_label(self,text_name,default_value):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.TextEntry or a Gtk.Label).
The default value can be a text or None. This element is copied to the UI,
but is never updated from the UI if the user changes it """
exec('self.'+text_name+' = default_value')
self.interface_labels.append(text_name)
def add_integer_adjustment(self,adjustment_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.Adjustment).
The default value must be an integer """
exec('self.'+adjustment_name+' = '+str(default_value))
self.interface_integer_adjustments.append( (adjustment_name, callback) )
def add_float_adjustment(self,adjustment_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.Adjustment).
The default value must be an float """
exec('self.'+adjustment_name+' = '+str(default_value))
self.interface_float_adjustments.append( (adjustment_name, callback))
def add_list(self,liststore_name,callback = None):
""" Adds an internal variable with the name liststore_name, linked to
an element with the same name (must be a Gtk.ListStore). """
exec('self.'+liststore_name+' = []')
self.interface_lists.append( (liststore_name, callback ))
def add_colorbutton(self,colorbutton_name, default_value,callback = None):
""" Adds an internal variable with the name colorbutton_name, linked to an
element with the same name (must be a Gtk.ColorButton).
The default value must be a set with RGBA values """
exec('self.'+colorbutton_name+' = default_value')
self.interface_colorbuttons.append( (colorbutton_name, callback ))
def add_fontbutton(self,fontbutton_name, default_value, callback = None):
""" Adds an internal variable with the name fontbutton_name, linked to an
element with the same name (must be a Gtk.FontButton).
The default value must be a string with the font values """
exec('self.'+fontbutton_name+' = default_value')
self.interface_fontbuttons.append( (fontbutton_name, callback ))
def add_filebutton(self,filebutton_name, default_value, callback = None):
""" Adds an internal variable with the name filebutton_name, linked to an
element with the same name (must be a Gtk.FileButton).
The default value must be a string with the font values """
exec('self.'+filebutton_name+' = default_value')
self.interface_filebuttons.append( (filebutton_name, callback ) )
def add_combobox(self,combobox_name,values,default_value,callback = None):
""" Adds an internal variable with the name combobox_name, linked to an
element with the same name (must be a Gtk.Combobox).
The default value must be an integer with the entry selected """
exec('self.'+combobox_name+' = default_value')
self.interface_comboboxes.append ( (combobox_name, values, callback) )
def add_show_hide(self,element_name,to_show,to_hide):
""" Adds an element that can be active or inactive, and two lists of elements.
The first one contains elements that will be visible when the element is
active, and invisible when it is inactive, and the second one contains
elements that will be visible when the element is inactive, and
invisible when the element is active """
self.interface_show_hide.append([element_name, to_show, to_hide])
def add_enable_disable(self,element_name,to_enable,to_disable):
""" Adds an element that can be active or inactive, and two lists of elements.
The first one contains elements that will be enabled when the element is
active, and disabled when it is inactive, and the second one contains
elements that will be enabled when the element is inactive, and
disabled when the element is active """
self.interface_enable_disable.append([element_name, to_enable, to_disable])
def update_ui(self,builder):
""" Sets the value of the widgets in base of the internal variables """
for key in self.interface_groups:
obj = eval('self.'+key)
builder.get_object(obj).set_active(True)
callback = self.interface_groups[key][1]
if (callback != None):
for element in self.interface_groups[key][0]:
obj = builder.get_object(element)
obj.connect("toggled",callback)
for element in self.interface_toggles:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
obj.set_active(value)
callback = element[1]
if (callback != None):
obj.connect("toggled",callback)
for element in self.interface_dualtoggles:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
obj2 = builder.get_object(element[1])
if value:
obj.set_active(True)
else:
obj2.set_active(True)
callback = element[2]
if (callback != None):
obj.connect("toggled",callback)
for element in self.interface_text:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_text(value)
else:
obj.set_text("")
callback = element[1]
if (callback != None):
obj.connect("changed",callback)
for element in self.interface_labels:
value = eval('self.'+element)
obj = builder.get_object(element)
if obj != None:
if (value != None):
obj.set_text(str(value))
else:
obj.set_text("")
for element in self.interface_integer_adjustments:
obj = builder.get_object(element[0])
if obj != None:
value = eval('self.'+element[0])
obj.set_value(float(value))
callback = element[1]
if (callback != None):
obj.connect("value_changed",callback)
for element in self.interface_float_adjustments:
obj = builder.get_object(element[0])
if obj != None:
value = eval('self.'+element[0])
obj.set_value(value)
callback = element[1]
if (callback != None):
obj.connect("value_changed",callback)
for element in self.interface_lists:
obj = eval('self.'+element[0])
the_liststore = builder.get_object(element[0])
the_liststore.clear()
for item in obj:
the_liststore.append(item)
callback = element[1]
if (callback != None):
the_liststore.connect("row_changed",callback)
the_liststore.connect("row_deleted",callback)
the_liststore.connect("row_inserted",callback)
the_liststore.connect("row_reordered",callback)
for element in self.interface_colorbuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
objcolor = Gdk.Color(int(value[0]*65535.0),int(value[1]*65535.0),int(value[2]*65535.0))
obj.set_color(objcolor)
obj.set_alpha(int(value[3]*65535.0))
callback = element[1]
if (callback != None):
obj.connect("color_set",callback)
for element in self.interface_fontbuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_font(value)
callback = element[1]
if (callback != None):
obj.connect("font_set",callback)
for element in self.interface_filebuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_filename(value)
callback = element[1]
if (callback != None):
obj.connect("file_set",callback)
for element in self.interface_comboboxes:
obj = eval('self.'+element[0])
the_combo = builder.get_object(element[0])
the_list = the_combo.get_model()
the_list.clear()
counter = 0
dv = 0
for item in element[1]:
the_list.append([item])
if (item == obj):
dv = counter
counter += 1
the_combo.set_active(dv)
callback = element[2]
if (callback != None):
the_combo.connect("changed",callback)
self.interface_show_hide_obj = {}
for element in self.interface_show_hide:
obj = builder.get_object(element[0])
to_show = []
for e2 in element[1]:
to_show.append(builder.get_object(e2))
to_hide = []
for e3 in element[2]:
to_hide.append(builder.get_object(e3))
self.interface_show_hide_obj[obj] = [to_show, to_hide]
obj.connect('toggled',self.toggled_element)
self.toggled_element(obj)
self.interface_enable_disable_obj = {}
for element in self.interface_enable_disable:
obj = builder.get_object(element[0])
to_enable = []
for e2 in element[1]:
to_enable.append(builder.get_object(e2))
to_disable = []
for e3 in element[2]:
to_disable.append(builder.get_object(e3))
self.interface_enable_disable_obj[obj] = [to_enable, to_disable]
obj.connect('toggled',self.toggled_element2)
self.toggled_element2(obj)
def toggled_element(self,element):
""" Wenever an element with 'hide' or 'show' needs is toggled, this callback is called """
# First, show all items for each possible element
for key in self.interface_show_hide_obj:
to_show = self.interface_show_hide_obj[key][0]
to_hide = self.interface_show_hide_obj[key][1]
active = key.get_active()
for item in to_show:
if active:
item.show()
for item in to_hide:
if not active:
item.show()
# And now, hide all items that must be hiden
# This is done this way because this allows to have an item being hiden by
# one widget, and being shown by another: in that case, it will be hiden always
for key in self.interface_show_hide_obj:
to_show = self.interface_show_hide_obj[key][0]
to_hide = self.interface_show_hide_obj[key][1]
active = key.get_active()
for item in to_show:
if not active:
item.hide()
for item in to_hide:
if active:
item.hide()
def toggled_element2(self,element):
""" Wenever an element with 'enable' or 'disable' needs is toggled, this callback is called """
# First enable all items that must be enabled
for key in self.interface_enable_disable_obj:
to_enable = self.interface_enable_disable_obj[key][0]
to_disable = self.interface_enable_disable_obj[key][1]
active = key.get_active()
if (active):
for item in to_enable:
item.set_sensitive(True)
else:
for item in to_disable:
item.set_sensitive(True)
# And now, disable all items that must be disabled
# This is done this way because this allows to have an item being disabled by
# one widget, and being enabled by another: in that case, it will be disabled always
for key in self.interface_enable_disable_obj:
to_enable = self.interface_enable_disable_obj[key][0]
to_disable = self.interface_enable_disable_obj[key][1]
active = key.get_active()
if (not active):
for item in to_enable:
item.set_sensitive(False)
else:
for item in to_disable:
item.set_sensitive(False)
def store_ui(self,builder):
""" Takes the values of the widgets and stores them in the internal variables """
for key in self.interface_groups:
for element in self.interface_groups[key][0]:
obj = builder.get_object(element)
if obj.get_active():
exec('self.'+key+' = "'+element+'"')
break
for element in self.interface_toggles:
obj = builder.get_object(element[0])
if obj.get_active():
exec('self.'+element[0]+' = True')
else:
exec('self.'+element[0]+' = False')
for element in self.interface_dualtoggles:
obj = builder.get_object(element[0])
if obj.get_active():
exec('self.'+element[0]+' = True')
else:
exec('self.'+element[0]+' = False')
for element in self.interface_text:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_text()')
for element in self.interface_integer_adjustments:
obj = builder.get_object(element[0])
if obj != None:
exec('self.'+element[0]+' = int(obj.get_value())')
for element in self.interface_float_adjustments:
obj = builder.get_object(element[0])
if obj != None:
exec('self.'+element[0]+' = obj.get_value()')
for element in self.interface_colorbuttons:
obj = builder.get_object(element[0])
objcolor = obj.get_color()
alpha = obj.get_alpha()
exec('self.'+element[0]+' = ((float(objcolor.red))/65535.0, (float(objcolor.green))/65535.0, (float(objcolor.blue))/65535.0, (float(alpha))/65535.0)')
for element in self.interface_fontbuttons:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_font()')
for element in self.interface_filebuttons:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_filename()')
for element in self.interface_lists:
exec('self.'+element[0]+' = []')
the_liststore = builder.get_object(element[0])
ncolumns = the_liststore.get_n_columns()
for row in the_liststore:
final_row = []
for c in range(0,ncolumns):
final_row.append(row.model[row.iter][c])
exec('self.'+element[0]+'.append(final_row)')
for element in self.interface_comboboxes:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = element[1][obj.get_active()]')
def save_ui(self):
""" Makes a copy of all the UI variables """
for element in self.interface_groups:
exec('self.'+element+'_backup = self.'+element)
for element in self.interface_toggles:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_dualtoggles:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_text:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_integer_adjustments:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_float_adjustments:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_colorbuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_fontbuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_filebuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_lists:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_comboboxes:
exec('self.'+element[0]+'_backup = self.'+element[0])
def restore_ui(self):
""" Restores a copy of all the UI variables """
for element in self.interface_groups:
exec('self.'+element+' = self.'+element+'_backup')
for element in self.interface_toggles:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_dualtoggles:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_text:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_integer_adjustments:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_float_adjustments:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_colorbuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_fontbuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_filebuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_lists:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_comboboxes:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
def serialize(self):
""" Returns a dictionary with both the variables of the interface and its values,
which can be restored with unserialize
"""
output = {}
for element in self.interface_groups:
output[element] = eval('self.'+element)
for element in self.interface_toggles:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_dualtoggles:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_text:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_integer_adjustments:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_float_adjustments:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_colorbuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_fontbuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_filebuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_lists:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_comboboxes:
output[element[0]] = eval('self.'+element[0])
return output
def unserialize(self,data_list):
""" Takes a dictionary with the variables of the interface and its values,
and restores them into their variables
"""
for element in self.interface_groups:
if element in data_list:
exec('self.'+element+' = data_list["'+element+'"]')
for element in self.interface_toggles:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_dualtoggles:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_text:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_integer_adjustments:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_float_adjustments:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_colorbuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_fontbuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_filebuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_lists:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_comboboxes:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
| gpl-3.0 |
jgeskens/django | django/db/models/loading.py | 8 | 10624 | "Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
from django.utils import six
import imp
import sys
import os
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store=SortedDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels={},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models=SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors={},
# -- Everything below here is only used when populating the cache --
loaded=False,
handled={},
postponed=[],
nesting_level=0,
_get_models_cache={},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
# Note that we want to use the import lock here - the app loading is
# in many cases initiated implicitly by importing, and thus it is
# possible to end up in deadlock when one thread initiates loading
# without holding the importer lock and another thread then tries to
# import something which also launches the app loading. For details of
# this situation see #18251.
imp.acquire_lock()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
imp.release_lock()
def _label_for(self, app_mod):
"""
Return app_label for given models module.
"""
return app_mod.__name__.split('.')[-2]
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
self.app_labels[self._label_for(models)] = models
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
imp.acquire_lock()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label)
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
imp.release_lock()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None,
include_auto_created=False, include_deferred=False,
only_installed=True, include_swapped=False):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
By default, models that aren't part of installed apps will *not*
be included in the list of models. However, if you specify
only_installed=False, they will be.
By default, models that have been swapped out will *not* be
included in the list of models. However, if you specify
include_swapped, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
if app_mod in self.app_store:
app_list = [self.app_models.get(self._label_for(app_mod),
SortedDict())]
else:
app_list = []
else:
if only_installed:
app_list = [self.app_models.get(app_label, SortedDict())
for app_label in six.iterkeys(self.app_labels)]
else:
app_list = six.itervalues(self.app_models)
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred) and
(not model._meta.auto_created or include_auto_created) and
(not model._meta.swapped or include_swapped))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name,
seed_cache=True, only_installed=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
return None
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.model_name
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__))
fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__))
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
| bsd-3-clause |
OptimusGitEtna/RestSymf | Python-3.4.2/Lib/_collections_abc.py | 68 | 19967 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed from collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such as MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return KeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return ItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
yield from self._mapping
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
'''S.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
Sequence.register(memoryview)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
"""All the operations on a read-write sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, value) -- insert value before index'
raise IndexError
def append(self, value):
'S.append(value) -- append value to the end of the sequence'
self.insert(len(self), value)
def clear(self):
'S.clear() -> None -- remove all items from S'
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| mit |
dahlstrom-g/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/btm_matcher.py | 326 | 6834 | """A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <[email protected]>"
import logging
import itertools
from collections import defaultdict
from . import pytree
from .btm_utils import reduce_tree
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
count = itertools.count()
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = next(BMNode.count)
self.content = ''
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results:
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/test/test_tcl.py | 11 | 29639 | import unittest
import re
import sys
import os
from test import test_support
from subprocess import Popen, PIPE
# Skip this test if the _tkinter module wasn't built.
_tkinter = test_support.import_module('_tkinter')
# Make sure tkinter._fix runs to set up the environment
tkinter = test_support.import_fresh_module('Tkinter')
from Tkinter import Tcl
from _tkinter import TclError
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = Tcl()
patchlevel = tcl.call('info', 'patchlevel')
m = re.match(r'(\d+)\.(\d+)([ab.])(\d+)$', patchlevel)
major, minor, releaselevel, serial = m.groups()
major, minor, serial = int(major), int(minor), int(serial)
releaselevel = {'a': 'alpha', 'b': 'beta', '.': 'final'}[releaselevel]
if releaselevel == 'final':
_tk_patchlevel = major, minor, serial, releaselevel, 0
else:
_tk_patchlevel = major, minor, 0, releaselevel, serial
return _tk_patchlevel
class TkinterTest(unittest.TestCase):
def testFlattenLen(self):
# flatten(<object with no length>)
self.assertRaises(TypeError, _tkinter._flatten, True)
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
self.wantobjects = self.interp.tk.wantobjects()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def get_integers(self):
integers = (0, 1, -1, 2**31-1, -2**31)
if tcl_version >= (8, 4): # wideInt was added in Tcl 8.4
integers += (2**31, -2**31-1, 2**63-1, -2**63)
# bignum was added in Tcl 8.5, but its support is able only since 8.5.8
if (get_tk_patchlevel() >= (8, 6, 0, 'final') or
(8, 5, 8) <= get_tk_patchlevel() < (8, 6)):
integers += (2**63, -2**63-1, 2**1000, -2**1000)
return integers
def test_getint(self):
tcl = self.interp.tk
for i in self.get_integers():
result = tcl.getint(' %d ' % i)
self.assertEqual(result, i)
self.assertIsInstance(result, type(int(result)))
if tcl_version >= (8, 5):
self.assertEqual(tcl.getint(' {:#o} '.format(i)), i)
self.assertEqual(tcl.getint(' %#o ' % i), i)
self.assertEqual(tcl.getint(' %#x ' % i), i)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.getint, str(2**1000))
self.assertEqual(tcl.getint(42), 42)
self.assertRaises(TypeError, tcl.getint)
self.assertRaises(TypeError, tcl.getint, '42', '10')
self.assertRaises(TypeError, tcl.getint, 42.0)
self.assertRaises(TclError, tcl.getint, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getint, '42\0')
if test_support.have_unicode:
self.assertEqual(tcl.getint(unicode('42')), 42)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getint, '42' + unichr(0xd800))
def test_getdouble(self):
tcl = self.interp.tk
self.assertEqual(tcl.getdouble(' 42 '), 42.0)
self.assertEqual(tcl.getdouble(' 42.5 '), 42.5)
self.assertEqual(tcl.getdouble(42.5), 42.5)
self.assertRaises(TypeError, tcl.getdouble)
self.assertRaises(TypeError, tcl.getdouble, '42.5', '10')
self.assertRaises(TypeError, tcl.getdouble, 42)
self.assertRaises(TclError, tcl.getdouble, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getdouble, '42.5\0')
if test_support.have_unicode:
self.assertEqual(tcl.getdouble(unicode('42.5')), 42.5)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getdouble, '42.5' + unichr(0xd800))
def test_getboolean(self):
tcl = self.interp.tk
self.assertIs(tcl.getboolean('on'), True)
self.assertIs(tcl.getboolean('1'), True)
self.assertIs(tcl.getboolean(u'on'), True)
self.assertIs(tcl.getboolean(u'1'), True)
self.assertIs(tcl.getboolean(42), True)
self.assertIs(tcl.getboolean(0), False)
self.assertIs(tcl.getboolean(42L), True)
self.assertIs(tcl.getboolean(0L), False)
self.assertRaises(TypeError, tcl.getboolean)
self.assertRaises(TypeError, tcl.getboolean, 'on', '1')
self.assertRaises(TypeError, tcl.getboolean, 1.0)
self.assertRaises(TclError, tcl.getboolean, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getboolean, 'on\0')
if test_support.have_unicode:
self.assertIs(tcl.getboolean(unicode('on')), True)
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getboolean, 'on' + unichr(0xd800))
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def test_evalfile_null_in_result(self):
tcl = self.interp
with open(test_support.TESTFN, 'wb') as f:
self.addCleanup(test_support.unlink, test_support.TESTFN)
f.write("""
set a "a\0b"
set b "a\\0b"
""")
tcl.evalfile(test_support.TESTFN)
self.assertEqual(tcl.eval('set a'), 'a\xc0\x80b')
self.assertEqual(tcl.eval('set b'), 'a\xc0\x80b')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception,e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
@unittest.skipUnless(sys.platform == 'win32', "only applies to Windows")
def testLoadWithUNC(self):
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
self.skipTest('unusable path: %r' % fullname)
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
with test_support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
cmd = '%s -c "import Tkinter; print Tkinter"' % (unc_name,)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
except WindowsError as e:
if e.winerror == 5:
self.skipTest('Not permitted to start the child process')
else:
raise
out_data, err_data = p.communicate()
msg = '\n\n'.join(['"Tkinter.py" not in output',
'Command:', cmd,
'stdout:', out_data,
'stderr:', err_data])
self.assertIn('Tkinter.py', out_data, msg)
self.assertEqual(p.wait(), 0, 'Non-zero exit code')
def test_exprstring(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprstring(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, str)
self.assertRaises(TypeError, tcl.exprstring)
self.assertRaises(TypeError, tcl.exprstring, '8.2', '+6')
self.assertRaises(TclError, tcl.exprstring, 'spam')
check('', '0')
check('8.2 + 6', '14.2')
check('3.1 + $a', '6.1')
check('2 + "$a.$b"', '5.6')
check('4*[llength "6 2"]', '8')
check('{word one} < "word $a"', '0')
check('4*2 < 7', '0')
check('hypot($a, 4)', '5.0')
check('5 / 4', '1')
check('5 / 4.0', '1.25')
check('5 / ( [string length "abcd"] + 0.0 )', '1.25')
check('20.0/5.0', '4.0')
check('"0x03" > "2"', '1')
check('[string length "a\xc2\xbd\xe2\x82\xac"]', '3')
check(r'[string length "a\xbd\u20ac"]', '3')
check('"abc"', 'abc')
check('"a\xc2\xbd\xe2\x82\xac"', 'a\xc2\xbd\xe2\x82\xac')
check(r'"a\xbd\u20ac"', 'a\xc2\xbd\xe2\x82\xac')
check(r'"a\0b"', 'a\xc0\x80b')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', str(2**64))
def test_exprdouble(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprdouble(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, float)
self.assertRaises(TypeError, tcl.exprdouble)
self.assertRaises(TypeError, tcl.exprdouble, '8.2', '+6')
self.assertRaises(TclError, tcl.exprdouble, 'spam')
check('', 0.0)
check('8.2 + 6', 14.2)
check('3.1 + $a', 6.1)
check('2 + "$a.$b"', 5.6)
check('4*[llength "6 2"]', 8.0)
check('{word one} < "word $a"', 0.0)
check('4*2 < 7', 0.0)
check('hypot($a, 4)', 5.0)
check('5 / 4', 1.0)
check('5 / 4.0', 1.25)
check('5 / ( [string length "abcd"] + 0.0 )', 1.25)
check('20.0/5.0', 4.0)
check('"0x03" > "2"', 1.0)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3.0)
check(r'[string length "a\xbd\u20ac"]', 3.0)
self.assertRaises(TclError, tcl.exprdouble, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', float(2**64))
def test_exprlong(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprlong(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertRaises(TypeError, tcl.exprlong)
self.assertRaises(TypeError, tcl.exprlong, '8.2', '+6')
self.assertRaises(TclError, tcl.exprlong, 'spam')
check('', 0)
check('8.2 + 6', 14)
check('3.1 + $a', 6)
check('2 + "$a.$b"', 5)
check('4*[llength "6 2"]', 8)
check('{word one} < "word $a"', 0)
check('4*2 < 7', 0)
check('hypot($a, 4)', 5)
check('5 / 4', 1)
check('5 / 4.0', 1)
check('5 / ( [string length "abcd"] + 0.0 )', 1)
check('20.0/5.0', 4)
check('"0x03" > "2"', 1)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3)
check(r'[string length "a\xbd\u20ac"]', 3)
self.assertRaises(TclError, tcl.exprlong, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.exprlong, '2**64')
def test_exprboolean(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprboolean(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertNotIsInstance(result, bool)
self.assertRaises(TypeError, tcl.exprboolean)
self.assertRaises(TypeError, tcl.exprboolean, '8.2', '+6')
self.assertRaises(TclError, tcl.exprboolean, 'spam')
check('', False)
for value in ('0', 'false', 'no', 'off'):
check(value, False)
check('"%s"' % value, False)
check('{%s}' % value, False)
for value in ('1', 'true', 'yes', 'on'):
check(value, True)
check('"%s"' % value, True)
check('{%s}' % value, True)
check('8.2 + 6', True)
check('3.1 + $a', True)
check('2 + "$a.$b"', True)
check('4*[llength "6 2"]', True)
check('{word one} < "word $a"', False)
check('4*2 < 7', False)
check('hypot($a, 4)', True)
check('5 / 4', True)
check('5 / 4.0', True)
check('5 / ( [string length "abcd"] + 0.0 )', True)
check('20.0/5.0', True)
check('"0x03" > "2"', True)
check('[string length "a\xc2\xbd\xe2\x82\xac"]', True)
check(r'[string length "a\xbd\u20ac"]', True)
self.assertRaises(TclError, tcl.exprboolean, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', True)
@unittest.skipUnless(tcl_version >= (8, 5), 'requires Tcl version >= 8.5')
def test_booleans(self):
tcl = self.interp
def check(expr, expected):
result = tcl.call('expr', expr)
if tcl.wantobjects():
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
else:
self.assertIn(result, (expr, str(int(expected))))
self.assertIsInstance(result, str)
check('true', True)
check('yes', True)
check('on', True)
check('false', False)
check('no', False)
check('off', False)
check('1 < 2', True)
check('1 > 2', False)
def test_expr_bignum(self):
tcl = self.interp
for i in self.get_integers():
result = tcl.call('expr', str(i))
if self.wantobjects:
self.assertEqual(result, i)
self.assertIsInstance(result, (int, long))
if abs(result) < 2**31:
self.assertIsInstance(result, int)
else:
self.assertEqual(result, str(i))
self.assertIsInstance(result, str)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.call, 'expr', str(2**1000))
def test_passing_values(self):
def passValue(value):
return self.interp.call('set', '_', value)
self.assertEqual(passValue(True), True if self.wantobjects else '1')
self.assertEqual(passValue(False), False if self.wantobjects else '0')
self.assertEqual(passValue('string'), 'string')
self.assertEqual(passValue('string\xbd'), 'string\xbd')
self.assertEqual(passValue('string\xe2\x82\xac'), u'string\u20ac')
self.assertEqual(passValue(u'string'), u'string')
self.assertEqual(passValue(u'string\xbd'), u'string\xbd')
self.assertEqual(passValue(u'string\u20ac'), u'string\u20ac')
self.assertEqual(passValue('str\x00ing'), 'str\x00ing')
self.assertEqual(passValue('str\xc0\x80ing'), 'str\x00ing')
self.assertEqual(passValue(u'str\x00ing'), u'str\x00ing')
self.assertEqual(passValue(u'str\x00ing\xbd'), u'str\x00ing\xbd')
self.assertEqual(passValue(u'str\x00ing\u20ac'), u'str\x00ing\u20ac')
for i in self.get_integers():
self.assertEqual(passValue(i), i if self.wantobjects else str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertEqual(passValue(2**1000), str(2**1000))
for f in (0.0, 1.0, -1.0, 1//3, 1/3.0,
sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
if self.wantobjects:
self.assertEqual(passValue(f), f)
else:
self.assertEqual(float(passValue(f)), f)
if self.wantobjects:
f = passValue(float('nan'))
self.assertNotEqual(f, f)
self.assertEqual(passValue(float('inf')), float('inf'))
self.assertEqual(passValue(-float('inf')), -float('inf'))
else:
self.assertEqual(float(passValue(float('inf'))), float('inf'))
self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
# XXX NaN representation can be not parsable by float()
self.assertEqual(passValue((1, '2', (3.4,))),
(1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
def test_user_command(self):
result = []
def testfunc(arg):
result.append(arg)
return arg
self.interp.createcommand('testfunc', testfunc)
self.addCleanup(self.interp.tk.deletecommand, 'testfunc')
def check(value, expected=None, eq=self.assertEqual):
if expected is None:
expected = value
del result[:]
r = self.interp.call('testfunc', value)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], (str, unicode))
eq(result[0], expected)
self.assertIsInstance(r, (str, unicode))
eq(r, expected)
def float_eq(actual, expected):
self.assertAlmostEqual(float(actual), expected,
delta=abs(expected) * 1e-10)
check(True, '1')
check(False, '0')
check('string')
check('string\xbd')
check('string\xe2\x82\xac', u'string\u20ac')
check('')
check(u'string')
check(u'string\xbd')
check(u'string\u20ac')
check(u'')
check('str\xc0\x80ing', u'str\x00ing')
check('str\xc0\x80ing\xe2\x82\xac', u'str\x00ing\u20ac')
check(u'str\x00ing')
check(u'str\x00ing\xbd')
check(u'str\x00ing\u20ac')
for i in self.get_integers():
check(i, str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
check(2**1000, str(2**1000))
for f in (0.0, 1.0, -1.0):
check(f, repr(f))
for f in (1/3.0, sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
check(f, eq=float_eq)
check(float('inf'), eq=float_eq)
check(-float('inf'), eq=float_eq)
# XXX NaN representation can be not parsable by float()
check((), '')
check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
def test_splitlist(self):
splitlist = self.interp.tk.splitlist
call = self.interp.tk.call
self.assertRaises(TypeError, splitlist)
self.assertRaises(TypeError, splitlist, 'a', 'b')
self.assertRaises(TypeError, splitlist, 2)
testcases = [
('2', ('2',)),
('', ()),
('{}', ('',)),
('""', ('',)),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(u'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
(u'a \u20ac', ('a', '\xe2\x82\xac')),
('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
('a {b c}', ('a', 'b c')),
(r'a b\ c', ('a', 'b c')),
(('a', 'b c'), ('a', 'b c')),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects:
expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
elif get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', u'\u20ac', u'\u20ac', '3.4')
else:
expected = (12, u'\u20ac', u'\u20ac', (3.4,))
testcases += [
(call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(splitlist(arg), res)
self.assertRaises(TclError, splitlist, '{')
def test_split(self):
split = self.interp.tk.split
call = self.interp.tk.call
self.assertRaises(TypeError, split)
self.assertRaises(TypeError, split, 'a', 'b')
self.assertRaises(TypeError, split, 2)
testcases = [
('2', '2'),
('', ''),
('{}', ''),
('""', ''),
('{', '{'),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(u'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
(u'a \u20ac', ('a', '\xe2\x82\xac')),
('a\xc0\x80b', 'a\xc0\x80b'),
('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
('a {b c}', ('a', ('b', 'c'))),
(r'a b\ c', ('a', ('b', 'c'))),
(('a', 'b c'), ('a', ('b', 'c'))),
(('a', u'b c'), ('a', ('b', 'c'))),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
(('a', (2, 3.4)), ('a', (2, 3.4))),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects:
expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
elif get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', u'\u20ac', u'\u20ac', '3.4')
else:
expected = (12, u'\u20ac', u'\u20ac', (3.4,))
testcases += [
(call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(split(arg), res)
def test_splitdict(self):
splitdict = tkinter._splitdict
tcl = self.interp.tk
arg = '-a {1 2 3} -something foo status {}'
self.assertEqual(splitdict(tcl, arg, False),
{'-a': '1 2 3', '-something': 'foo', 'status': ''})
self.assertEqual(splitdict(tcl, arg),
{'a': '1 2 3', 'something': 'foo', 'status': ''})
arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}')
self.assertEqual(splitdict(tcl, arg, False),
{'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'})
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3), 'something': 'foo', 'status': '{}'})
self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ')
self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c'))
arg = tcl.call('list',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3) if self.wantobjects else '1 2 3',
'something': 'foo', 'status': ''})
if tcl_version >= (8, 5):
arg = tcl.call('dict', 'create',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = {'a': '1 2 3', 'something': 'foo', 'status': ''}
else:
expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''}
self.assertEqual(splitdict(tcl, arg), expected)
character_size = 4 if sys.maxunicode > 0xFFFF else 2
class BigmemTclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
@test_support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=5, dry_run=False)
def test_huge_string_call(self, size):
value = ' ' * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@test_support.cpython_only
@unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1,
memuse=2*character_size + 2,
dry_run=False)
def test_huge_unicode_call(self, size):
value = unicode(' ') * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@test_support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=9, dry_run=False)
def test_huge_string_builtins(self, size):
value = '1' + ' ' * size
self.check_huge_string_builtins(value)
@test_support.cpython_only
@unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@test_support.precisionbigmemtest(size=INT_MAX + 1,
memuse=2*character_size + 7,
dry_run=False)
def test_huge_unicode_builtins(self, size):
value = unicode('1' + ' ' * size)
self.check_huge_string_builtins(value)
def check_huge_string_builtins(self, value):
self.assertRaises(OverflowError, self.interp.tk.getint, value)
self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
self.assertRaises(OverflowError, self.interp.eval, value)
self.assertRaises(OverflowError, self.interp.evalfile, value)
self.assertRaises(OverflowError, self.interp.record, value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
self.assertRaises(OverflowError, self.interp.unsetvar, value)
self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.exprstring, value)
self.assertRaises(OverflowError, self.interp.exprlong, value)
self.assertRaises(OverflowError, self.interp.exprboolean, value)
self.assertRaises(OverflowError, self.interp.splitlist, value)
self.assertRaises(OverflowError, self.interp.split, value)
self.assertRaises(OverflowError, self.interp.createcommand, value, max)
self.assertRaises(OverflowError, self.interp.deletecommand, value)
def setUpModule():
if test_support.verbose:
tcl = Tcl()
print 'patchlevel =', tcl.call('info', 'patchlevel')
def test_main():
test_support.run_unittest(TclTest, TkinterTest, BigmemTclTest)
if __name__ == "__main__":
test_main()
| mit |
lstephen/construi | construi/errors.py | 1 | 1545 | import sys
import traceback
from typing import Any, Callable, Dict, NoReturn
import construi.console as console
from compose.errors import OperationFailedError
from compose.service import BuildError
from docker.errors import APIError
from .config import ConfigException, NoSuchTargetException
from .target import BuildFailedException
def show_error(fmt, arg=lambda e: "", show_traceback=False):
# type: (str, Callable[[Any], Any], bool) -> Callable[[Exception], None]
def f(e):
# type: (Exception) -> None
console.error(("\n" + fmt + "\n").format(arg(e)))
if show_traceback:
traceback.print_exc()
return f
def on_keyboard_interrupt(e):
# type: (KeyboardInterrupt) -> None
console.warn("\nBuild Interrupted.")
def on_unhandled_exception(e):
# type: (Exception) -> NoReturn
raise e
HANDLERS = {
KeyboardInterrupt: on_keyboard_interrupt,
APIError: show_error("Docker Error: {}", lambda e: e.explanation),
OperationFailedError: show_error(
"Unexpected Error: {}", lambda e: e.msg, show_traceback=True
),
BuildError: show_error("Error building docker image."),
NoSuchTargetException: show_error("No such target: {}", lambda e: e.target),
ConfigException: show_error("Configuration Error: {}", lambda e: e.msg),
BuildFailedException: show_error("Build Failed."),
} # type: Dict[Any, Callable[[Any], None]]
def on_exception(e):
# type: (Exception) -> NoReturn
HANDLERS.get(type(e), on_unhandled_exception)(e)
sys.exit(1)
| apache-2.0 |
garvitr/sympy | sympy/strategies/branch/tests/test_core.py | 58 | 2416 | from sympy.strategies.branch.core import (exhaust, debug, multiplex,
condition, notempty, chain, onaction, sfilter, yieldify, do_one,
identity)
from sympy.core.compatibility import get_function_name, range
def posdec(x):
if x > 0:
yield x-1
else:
yield x
def branch5(x):
if 0 < x < 5:
yield x-1
elif 5 < x < 10:
yield x+1
elif x == 5:
yield x+1
yield x-1
else:
yield x
even = lambda x: x%2 == 0
def inc(x):
yield x + 1
def one_to_n(n):
for i in range(n):
yield i
def test_exhaust():
brl = exhaust(branch5)
assert set(brl(3)) == set([0])
assert set(brl(7)) == set([10])
assert set(brl(5)) == set([0, 10])
def test_debug():
from sympy.core.compatibility import StringIO
file = StringIO()
rl = debug(posdec, file)
list(rl(5))
log = file.getvalue()
file.close()
assert get_function_name(posdec) in log
assert '5' in log
assert '4' in log
def test_multiplex():
brl = multiplex(posdec, branch5)
assert set(brl(3)) == set([2])
assert set(brl(7)) == set([6, 8])
assert set(brl(5)) == set([4, 6])
def test_condition():
brl = condition(even, branch5)
assert set(brl(4)) == set(branch5(4))
assert set(brl(5)) == set([])
def test_sfilter():
brl = sfilter(even, one_to_n)
assert set(brl(10)) == set([0, 2, 4, 6, 8])
def test_notempty():
def ident_if_even(x):
if even(x):
yield x
brl = notempty(ident_if_even)
assert set(brl(4)) == set([4])
assert set(brl(5)) == set([5])
def test_chain():
assert list(chain()(2)) == [2] # identity
assert list(chain(inc, inc)(2)) == [4]
assert list(chain(branch5, inc)(4)) == [4]
assert set(chain(branch5, inc)(5)) == set([5, 7])
assert list(chain(inc, branch5)(5)) == [7]
def test_onaction():
L = []
def record(fn, input, output):
L.append((input, output))
list(onaction(inc, record)(2))
assert L == [(2, 3)]
list(onaction(identity, record)(2))
assert L == [(2, 3)]
def test_yieldify():
inc = lambda x: x + 1
yinc = yieldify(inc)
assert list(yinc(3)) == [4]
def test_do_one():
def bad(expr):
raise ValueError()
yield False
assert list(do_one(inc)(3)) == [4]
assert list(do_one(inc, bad)(3)) == [4]
assert list(do_one(inc, posdec)(3)) == [4]
| bsd-3-clause |
tedi3231/openerp | build/lib/openerp/addons/project_mrp/__init__.py | 68 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_procurement
import project_mrp
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jitsusama/lets-do-dns | tests/unit/acme_dns_auth/authenticate/test_cleanup_stage.py | 1 | 2731 | """Tests the lets_do_dns.acme_dns_auth.authenticate.py module."""
from mock import call, ANY
import pytest
from lets_do_dns.environment import Environment
from lets_do_dns.acme_dns_auth.authenticate import Authenticate
def test_properly_initializes_resource(mocker):
stub_environment = mocker.MagicMock(
spec=Environment,
api_key='stub-api-key', domain='stub-domain', validation_key=None,
fqdn='stub-host.stub-domain', record_id=984567, post_cmd=None)
mock_resource = mocker.patch(
'lets_do_dns.acme_dns_auth.authenticate.Resource')
authentication = Authenticate(environment=stub_environment)
authentication.perform()
mock_resource.assert_called_once_with(
'stub-api-key', '_acme-challenge.stub-host',
'stub-domain', None, 984567)
def test_triggers_resource_delete_after_resource_init(mocker):
stub_environment = mocker.MagicMock(
spec=Environment,
api_key=None, domain='stub-domain', validation_key=None,
fqdn='stub-host.stub-domain', record_id=0, post_cmd=None)
mock_resource = mocker.patch(
'lets_do_dns.acme_dns_auth.authenticate.Resource')
authentication = Authenticate(environment=stub_environment)
authentication.perform()
initialize_then_delete = [
call(ANY, ANY, ANY, ANY, ANY),
call().delete()]
mock_resource.assert_has_calls(initialize_then_delete)
def test_does_not_call_sleep(mocker):
stub_environment = mocker.MagicMock(
spec=Environment,
api_key=None, domain='stub-domain', validation_key=None,
fqdn='stub-host.stub-domain', record_id=1, post_cmd=None)
mocker.patch('lets_do_dns.acme_dns_auth.authenticate.Resource')
mock_sleep = mocker.patch(
'lets_do_dns.acme_dns_auth.authenticate.sleep')
authentication = Authenticate(environment=stub_environment)
authentication.perform()
mock_sleep.assert_not_called()
@pytest.mark.parametrize(
'fqdn', ['stub-host1.stub-domain', 'stub-host2.stub-domain'])
def test_passes_postcmd_to_run(mocker, fqdn):
stub_environment = mocker.MagicMock(
spec=Environment,
api_key=None, domain='stub-domain', validation_key=None,
fqdn=fqdn, record_id=3, post_cmd='test-program --help')
mocker.patch('lets_do_dns.acme_dns_auth.authenticate.Resource')
mock_run = mocker.patch('lets_do_dns.acme_dns_auth.authenticate.run')
authentication = Authenticate(environment=stub_environment)
authentication.perform()
mock_run.assert_called_once_with(
'test-program --help',
env={'CERTBOT_HOSTNAME': fqdn,
'PATH': ("/bin:/sbin:/usr/bin:/usr/sbin:"
"/usr/local/bin:/usr/local/sbin")})
| apache-2.0 |
amanand/vmx-docker-lwaftr | jetapp/src/conf/protos/openconfig_service_pb2.py | 1 | 59081 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: openconfig_service.proto
import sys
_b = sys.version_info[0] < 3 and (
lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='openconfig_service.proto',
package='openconfig',
syntax='proto3',
serialized_pb=_b('\n\x18openconfig_service.proto\x12\nopenconfig\"-\n\x17GetDataEncodingsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\"\xb9\x01\n\x18GetDataEncodingsResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x03(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12=\n\rresponse_code\x18\x03 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x04 \x01(\t\"g\n\x16SetDataEncodingRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\"}\n\x17SetDataEncodingResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12=\n\rresponse_code\x18\x02 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x03 \x01(\t\"&\n\x10GetModelsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\"9\n\x05Model\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x99\x01\n\x11GetModelsResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12 \n\x05model\x18\x02 \x03(\x0b\x32\x11.openconfig.Model\x12=\n\rresponse_code\x18\x03 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x04 \x01(\t\"d\n\x0eGetRequestList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12.\n\toperation\x18\x02 \x01(\x0e\x32\x1b.openconfig.GetDataCommands\x12\x0c\n\x04path\x18\x03 \x01(\t\"\x8c\x01\n\nGetRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12/\n\x0bget_request\x18\x03 \x03(\x0b\x32\x1a.openconfig.GetRequestList\"\xed\x01\n\x0bGetResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x36\n\x08response\x18\x02 \x03(\x0b\x32$.openconfig.GetResponse.ResponseList\x1a\x91\x01\n\x0cResponseList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12=\n\rresponse_code\x18\x04 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x05 \x01(\t\"\xb2\x02\n\nSetRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x13\n\x0btransaction\x18\x02 \x01(\x08\x12\x39\n\x08\x65ncoding\x18\x03 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12\x44\n\x10\x63onfig_operation\x18\x04 \x03(\x0b\x32*.openconfig.SetRequest.ConfigOperationList\x1az\n\x13\x43onfigOperationList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12\x30\n\toperation\x18\x02 \x01(\x0e\x32\x1d.openconfig.SetConfigCommands\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\r\n\x05value\x18\x04 \x01(\t\"\xcf\x01\n\x0bSetResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x36\n\x08response\x18\x02 \x03(\x0b\x32$.openconfig.SetResponse.ResponseList\x1at\n\x0cResponseList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12=\n\rresponse_code\x18\x02 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x03 \x01(\t*B\n\x1bOpenConfigDataEncodingTypes\x12\x10\n\x0c\x45NCODING_XML\x10\x00\x12\x11\n\rENCODING_JSON\x10\x01*M\n\x0fGetDataCommands\x12\x0b\n\x07GET_ALL\x10\x00\x12\x0e\n\nGET_CONFIG\x10\x01\x12\x0f\n\x0bGET_OPSTATE\x10\x02\x12\x0c\n\x08GET_OPER\x10\x03*M\n\x11SetConfigCommands\x12\x11\n\rUPDATE_CONFIG\x10\x00\x12\x12\n\x0eREPLACE_CONFIG\x10\x01\x12\x11\n\rDELETE_CONFIG\x10\x02*\xc1\x01\n\x1aOpenConfigRpcResponseTypes\x12\x06\n\x02OK\x10\x00\x12\x07\n\x03NOK\x10\x01\x12\x14\n\x10UNSUPPORTED_PATH\x10\x02\x12\x10\n\x0cINVALID_PATH\x10\x03\x12\x19\n\x15INVALID_CONFIGURATION\x10\x04\x12\x18\n\x14UNSUPPORTED_INTERVAL\x10\x05\x12\x1b\n\x17INVALID_SUBSCRIPTION_ID\x10\x06\x12\x18\n\x14UNSUPPORTED_ENCODING\x10\x07\x32\x91\x03\n\x10OpenconfigRpcApi\x12_\n\x10GetDataEncodings\x12#.openconfig.GetDataEncodingsRequest\x1a$.openconfig.GetDataEncodingsResponse\"\x00\x12\\\n\x0fSetDataEncoding\x12\".openconfig.SetDataEncodingRequest\x1a#.openconfig.SetDataEncodingResponse\"\x00\x12J\n\tGetModels\x12\x1c.openconfig.GetModelsRequest\x1a\x1d.openconfig.GetModelsResponse\"\x00\x12\x38\n\x03Get\x12\x16.openconfig.GetRequest\x1a\x17.openconfig.GetResponse\"\x00\x12\x38\n\x03Set\x12\x16.openconfig.SetRequest\x1a\x17.openconfig.SetResponse\"\x00\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_OPENCONFIGDATAENCODINGTYPES = _descriptor.EnumDescriptor(
name='OpenConfigDataEncodingTypes',
full_name='openconfig.OpenConfigDataEncodingTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ENCODING_XML', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ENCODING_JSON', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1766,
serialized_end=1832,
)
_sym_db.RegisterEnumDescriptor(_OPENCONFIGDATAENCODINGTYPES)
OpenConfigDataEncodingTypes = enum_type_wrapper.EnumTypeWrapper(
_OPENCONFIGDATAENCODINGTYPES)
_GETDATACOMMANDS = _descriptor.EnumDescriptor(
name='GetDataCommands',
full_name='openconfig.GetDataCommands',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='GET_ALL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_CONFIG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_OPSTATE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_OPER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1834,
serialized_end=1911,
)
_sym_db.RegisterEnumDescriptor(_GETDATACOMMANDS)
GetDataCommands = enum_type_wrapper.EnumTypeWrapper(_GETDATACOMMANDS)
_SETCONFIGCOMMANDS = _descriptor.EnumDescriptor(
name='SetConfigCommands',
full_name='openconfig.SetConfigCommands',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UPDATE_CONFIG', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_CONFIG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_CONFIG', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1913,
serialized_end=1990,
)
_sym_db.RegisterEnumDescriptor(_SETCONFIGCOMMANDS)
SetConfigCommands = enum_type_wrapper.EnumTypeWrapper(_SETCONFIGCOMMANDS)
_OPENCONFIGRPCRESPONSETYPES = _descriptor.EnumDescriptor(
name='OpenConfigRpcResponseTypes',
full_name='openconfig.OpenConfigRpcResponseTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_PATH', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_PATH', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_CONFIGURATION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_INTERVAL', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_SUBSCRIPTION_ID', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_ENCODING', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1993,
serialized_end=2186,
)
_sym_db.RegisterEnumDescriptor(_OPENCONFIGRPCRESPONSETYPES)
OpenConfigRpcResponseTypes = enum_type_wrapper.EnumTypeWrapper(
_OPENCONFIGRPCRESPONSETYPES)
ENCODING_XML = 0
ENCODING_JSON = 1
GET_ALL = 0
GET_CONFIG = 1
GET_OPSTATE = 2
GET_OPER = 3
UPDATE_CONFIG = 0
REPLACE_CONFIG = 1
DELETE_CONFIG = 2
OK = 0
NOK = 1
UNSUPPORTED_PATH = 2
INVALID_PATH = 3
INVALID_CONFIGURATION = 4
UNSUPPORTED_INTERVAL = 5
INVALID_SUBSCRIPTION_ID = 6
UNSUPPORTED_ENCODING = 7
_GETDATAENCODINGSREQUEST = _descriptor.Descriptor(
name='GetDataEncodingsRequest',
full_name='openconfig.GetDataEncodingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetDataEncodingsRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=85,
)
_GETDATAENCODINGSRESPONSE = _descriptor.Descriptor(
name='GetDataEncodingsResponse',
full_name='openconfig.GetDataEncodingsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetDataEncodingsResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.GetDataEncodingsResponse.encoding', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetDataEncodingsResponse.response_code', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetDataEncodingsResponse.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=273,
)
_SETDATAENCODINGREQUEST = _descriptor.Descriptor(
name='SetDataEncodingRequest',
full_name='openconfig.SetDataEncodingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetDataEncodingRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.SetDataEncodingRequest.encoding', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=378,
)
_SETDATAENCODINGRESPONSE = _descriptor.Descriptor(
name='SetDataEncodingResponse',
full_name='openconfig.SetDataEncodingResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetDataEncodingResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.SetDataEncodingResponse.response_code', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.SetDataEncodingResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=380,
serialized_end=505,
)
_GETMODELSREQUEST = _descriptor.Descriptor(
name='GetModelsRequest',
full_name='openconfig.GetModelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetModelsRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=507,
serialized_end=545,
)
_MODEL = _descriptor.Descriptor(
name='Model',
full_name='openconfig.Model',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='openconfig.Model.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='namespace', full_name='openconfig.Model.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='openconfig.Model.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=604,
)
_GETMODELSRESPONSE = _descriptor.Descriptor(
name='GetModelsResponse',
full_name='openconfig.GetModelsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetModelsResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='openconfig.GetModelsResponse.model', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetModelsResponse.response_code', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetModelsResponse.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=607,
serialized_end=760,
)
_GETREQUESTLIST = _descriptor.Descriptor(
name='GetRequestList',
full_name='openconfig.GetRequestList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.GetRequestList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='openconfig.GetRequestList.operation', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.GetRequestList.path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=762,
serialized_end=862,
)
_GETREQUEST = _descriptor.Descriptor(
name='GetRequest',
full_name='openconfig.GetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.GetRequest.encoding', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='get_request', full_name='openconfig.GetRequest.get_request', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=865,
serialized_end=1005,
)
_GETRESPONSE_RESPONSELIST = _descriptor.Descriptor(
name='ResponseList',
full_name='openconfig.GetResponse.ResponseList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.GetResponse.ResponseList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.GetResponse.ResponseList.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='openconfig.GetResponse.ResponseList.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetResponse.ResponseList.response_code', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetResponse.ResponseList.message', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1100,
serialized_end=1245,
)
_GETRESPONSE = _descriptor.Descriptor(
name='GetResponse',
full_name='openconfig.GetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response', full_name='openconfig.GetResponse.response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GETRESPONSE_RESPONSELIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1008,
serialized_end=1245,
)
_SETREQUEST_CONFIGOPERATIONLIST = _descriptor.Descriptor(
name='ConfigOperationList',
full_name='openconfig.SetRequest.ConfigOperationList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.SetRequest.ConfigOperationList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='openconfig.SetRequest.ConfigOperationList.operation', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.SetRequest.ConfigOperationList.path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='openconfig.SetRequest.ConfigOperationList.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1432,
serialized_end=1554,
)
_SETREQUEST = _descriptor.Descriptor(
name='SetRequest',
full_name='openconfig.SetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='openconfig.SetRequest.transaction', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.SetRequest.encoding', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='config_operation', full_name='openconfig.SetRequest.config_operation', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SETREQUEST_CONFIGOPERATIONLIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1248,
serialized_end=1554,
)
_SETRESPONSE_RESPONSELIST = _descriptor.Descriptor(
name='ResponseList',
full_name='openconfig.SetResponse.ResponseList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.SetResponse.ResponseList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.SetResponse.ResponseList.response_code', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.SetResponse.ResponseList.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1648,
serialized_end=1764,
)
_SETRESPONSE = _descriptor.Descriptor(
name='SetResponse',
full_name='openconfig.SetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response', full_name='openconfig.SetResponse.response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SETRESPONSE_RESPONSELIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1557,
serialized_end=1764,
)
_GETDATAENCODINGSRESPONSE.fields_by_name[
'encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_GETDATAENCODINGSRESPONSE.fields_by_name[
'response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_SETDATAENCODINGREQUEST.fields_by_name[
'encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_SETDATAENCODINGRESPONSE.fields_by_name[
'response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETMODELSRESPONSE.fields_by_name['model'].message_type = _MODEL
_GETMODELSRESPONSE.fields_by_name[
'response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETREQUESTLIST.fields_by_name['operation'].enum_type = _GETDATACOMMANDS
_GETREQUEST.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_GETREQUEST.fields_by_name['get_request'].message_type = _GETREQUESTLIST
_GETRESPONSE_RESPONSELIST.fields_by_name[
'response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETRESPONSE_RESPONSELIST.containing_type = _GETRESPONSE
_GETRESPONSE.fields_by_name[
'response'].message_type = _GETRESPONSE_RESPONSELIST
_SETREQUEST_CONFIGOPERATIONLIST.fields_by_name[
'operation'].enum_type = _SETCONFIGCOMMANDS
_SETREQUEST_CONFIGOPERATIONLIST.containing_type = _SETREQUEST
_SETREQUEST.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_SETREQUEST.fields_by_name[
'config_operation'].message_type = _SETREQUEST_CONFIGOPERATIONLIST
_SETRESPONSE_RESPONSELIST.fields_by_name[
'response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_SETRESPONSE_RESPONSELIST.containing_type = _SETRESPONSE
_SETRESPONSE.fields_by_name[
'response'].message_type = _SETRESPONSE_RESPONSELIST
DESCRIPTOR.message_types_by_name[
'GetDataEncodingsRequest'] = _GETDATAENCODINGSREQUEST
DESCRIPTOR.message_types_by_name[
'GetDataEncodingsResponse'] = _GETDATAENCODINGSRESPONSE
DESCRIPTOR.message_types_by_name[
'SetDataEncodingRequest'] = _SETDATAENCODINGREQUEST
DESCRIPTOR.message_types_by_name[
'SetDataEncodingResponse'] = _SETDATAENCODINGRESPONSE
DESCRIPTOR.message_types_by_name['GetModelsRequest'] = _GETMODELSREQUEST
DESCRIPTOR.message_types_by_name['Model'] = _MODEL
DESCRIPTOR.message_types_by_name['GetModelsResponse'] = _GETMODELSRESPONSE
DESCRIPTOR.message_types_by_name['GetRequestList'] = _GETREQUESTLIST
DESCRIPTOR.message_types_by_name['GetRequest'] = _GETREQUEST
DESCRIPTOR.message_types_by_name['GetResponse'] = _GETRESPONSE
DESCRIPTOR.message_types_by_name['SetRequest'] = _SETREQUEST
DESCRIPTOR.message_types_by_name['SetResponse'] = _SETRESPONSE
DESCRIPTOR.enum_types_by_name[
'OpenConfigDataEncodingTypes'] = _OPENCONFIGDATAENCODINGTYPES
DESCRIPTOR.enum_types_by_name['GetDataCommands'] = _GETDATACOMMANDS
DESCRIPTOR.enum_types_by_name['SetConfigCommands'] = _SETCONFIGCOMMANDS
DESCRIPTOR.enum_types_by_name[
'OpenConfigRpcResponseTypes'] = _OPENCONFIGRPCRESPONSETYPES
GetDataEncodingsRequest = _reflection.GeneratedProtocolMessageType('GetDataEncodingsRequest', (_message.Message,), dict(
DESCRIPTOR=_GETDATAENCODINGSREQUEST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetDataEncodingsRequest)
))
_sym_db.RegisterMessage(GetDataEncodingsRequest)
GetDataEncodingsResponse = _reflection.GeneratedProtocolMessageType('GetDataEncodingsResponse', (_message.Message,), dict(
DESCRIPTOR=_GETDATAENCODINGSRESPONSE,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetDataEncodingsResponse)
))
_sym_db.RegisterMessage(GetDataEncodingsResponse)
SetDataEncodingRequest = _reflection.GeneratedProtocolMessageType('SetDataEncodingRequest', (_message.Message,), dict(
DESCRIPTOR=_SETDATAENCODINGREQUEST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetDataEncodingRequest)
))
_sym_db.RegisterMessage(SetDataEncodingRequest)
SetDataEncodingResponse = _reflection.GeneratedProtocolMessageType('SetDataEncodingResponse', (_message.Message,), dict(
DESCRIPTOR=_SETDATAENCODINGRESPONSE,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetDataEncodingResponse)
))
_sym_db.RegisterMessage(SetDataEncodingResponse)
GetModelsRequest = _reflection.GeneratedProtocolMessageType('GetModelsRequest', (_message.Message,), dict(
DESCRIPTOR=_GETMODELSREQUEST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetModelsRequest)
))
_sym_db.RegisterMessage(GetModelsRequest)
Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict(
DESCRIPTOR=_MODEL,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.Model)
))
_sym_db.RegisterMessage(Model)
GetModelsResponse = _reflection.GeneratedProtocolMessageType('GetModelsResponse', (_message.Message,), dict(
DESCRIPTOR=_GETMODELSRESPONSE,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetModelsResponse)
))
_sym_db.RegisterMessage(GetModelsResponse)
GetRequestList = _reflection.GeneratedProtocolMessageType('GetRequestList', (_message.Message,), dict(
DESCRIPTOR=_GETREQUESTLIST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetRequestList)
))
_sym_db.RegisterMessage(GetRequestList)
GetRequest = _reflection.GeneratedProtocolMessageType('GetRequest', (_message.Message,), dict(
DESCRIPTOR=_GETREQUEST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetRequest)
))
_sym_db.RegisterMessage(GetRequest)
GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), dict(
ResponseList=_reflection.GeneratedProtocolMessageType('ResponseList', (_message.Message,), dict(
DESCRIPTOR=_GETRESPONSE_RESPONSELIST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetResponse.ResponseList)
)),
DESCRIPTOR=_GETRESPONSE,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetResponse)
))
_sym_db.RegisterMessage(GetResponse)
_sym_db.RegisterMessage(GetResponse.ResponseList)
SetRequest = _reflection.GeneratedProtocolMessageType('SetRequest', (_message.Message,), dict(
ConfigOperationList=_reflection.GeneratedProtocolMessageType('ConfigOperationList', (_message.Message,), dict(
DESCRIPTOR=_SETREQUEST_CONFIGOPERATIONLIST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetRequest.ConfigOperationList)
)),
DESCRIPTOR=_SETREQUEST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetRequest)
))
_sym_db.RegisterMessage(SetRequest)
_sym_db.RegisterMessage(SetRequest.ConfigOperationList)
SetResponse = _reflection.GeneratedProtocolMessageType('SetResponse', (_message.Message,), dict(
ResponseList=_reflection.GeneratedProtocolMessageType('ResponseList', (_message.Message,), dict(
DESCRIPTOR=_SETRESPONSE_RESPONSELIST,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetResponse.ResponseList)
)),
DESCRIPTOR=_SETRESPONSE,
__module__='openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetResponse)
))
_sym_db.RegisterMessage(SetResponse)
_sym_db.RegisterMessage(SetResponse.ResponseList)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class OpenconfigRpcApiStub(object):
"""
MGD Service Definitions
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataEncodings = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/GetDataEncodings',
request_serializer=GetDataEncodingsRequest.SerializeToString,
response_deserializer=GetDataEncodingsResponse.FromString,
)
self.SetDataEncoding = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/SetDataEncoding',
request_serializer=SetDataEncodingRequest.SerializeToString,
response_deserializer=SetDataEncodingResponse.FromString,
)
self.GetModels = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/GetModels',
request_serializer=GetModelsRequest.SerializeToString,
response_deserializer=GetModelsResponse.FromString,
)
self.Get = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/Get',
request_serializer=GetRequest.SerializeToString,
response_deserializer=GetResponse.FromString,
)
self.Set = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/Set',
request_serializer=SetRequest.SerializeToString,
response_deserializer=SetResponse.FromString,
)
class OpenconfigRpcApiServicer(object):
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, context):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetDataEncoding(self, request, context):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModels(self, request, context):
"""
Returns a repeated structure of supported data models
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Set(self, request, context):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OpenconfigRpcApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDataEncodings': grpc.unary_unary_rpc_method_handler(
servicer.GetDataEncodings,
request_deserializer=GetDataEncodingsRequest.FromString,
response_serializer=GetDataEncodingsResponse.SerializeToString,
),
'SetDataEncoding': grpc.unary_unary_rpc_method_handler(
servicer.SetDataEncoding,
request_deserializer=SetDataEncodingRequest.FromString,
response_serializer=SetDataEncodingResponse.SerializeToString,
),
'GetModels': grpc.unary_unary_rpc_method_handler(
servicer.GetModels,
request_deserializer=GetModelsRequest.FromString,
response_serializer=GetModelsResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=GetRequest.FromString,
response_serializer=GetResponse.SerializeToString,
),
'Set': grpc.unary_unary_rpc_method_handler(
servicer.Set,
request_deserializer=SetRequest.FromString,
response_serializer=SetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'openconfig.OpenconfigRpcApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaOpenconfigRpcApiServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, context):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def SetDataEncoding(self, request, context):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModels(self, request, context):
"""
Returns a repeated structure of supported data models
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Get(self, request, context):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Set(self, request, context):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaOpenconfigRpcApiStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
raise NotImplementedError()
GetDataEncodings.future = None
def SetDataEncoding(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
raise NotImplementedError()
SetDataEncoding.future = None
def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Returns a repeated structure of supported data models
"""
raise NotImplementedError()
GetModels.future = None
def Get(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
raise NotImplementedError()
Get.future = None
def Set(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
raise NotImplementedError()
Set.future = None
def beta_create_OpenconfigRpcApi_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetRequest.FromString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsRequest.FromString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsRequest.FromString,
('openconfig.OpenconfigRpcApi', 'Set'): SetRequest.FromString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingRequest.FromString,
}
response_serializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'Set'): SetResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingResponse.SerializeToString,
}
method_implementations = {
('openconfig.OpenconfigRpcApi', 'Get'): face_utilities.unary_unary_inline(servicer.Get),
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): face_utilities.unary_unary_inline(servicer.GetDataEncodings),
('openconfig.OpenconfigRpcApi', 'GetModels'): face_utilities.unary_unary_inline(servicer.GetModels),
('openconfig.OpenconfigRpcApi', 'Set'): face_utilities.unary_unary_inline(servicer.Set),
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): face_utilities.unary_unary_inline(servicer.SetDataEncoding),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers,
thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_OpenconfigRpcApi_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'Set'): SetRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingRequest.SerializeToString,
}
response_deserializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetResponse.FromString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsResponse.FromString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsResponse.FromString,
('openconfig.OpenconfigRpcApi', 'Set'): SetResponse.FromString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingResponse.FromString,
}
cardinalities = {
'Get': cardinality.Cardinality.UNARY_UNARY,
'GetDataEncodings': cardinality.Cardinality.UNARY_UNARY,
'GetModels': cardinality.Cardinality.UNARY_UNARY,
'Set': cardinality.Cardinality.UNARY_UNARY,
'SetDataEncoding': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers,
response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'openconfig.OpenconfigRpcApi', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
WSCU/crazyflie_ros | lib/cflib/crtp/exceptions.py | 31 | 1605 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Exception used when the URI is not for the current driver
(ie. radio:// for the serial driver ...)
It basically means that an oser driver could do the job
It does NOT means that the URI is good or bad
"""
__author__ = 'Bitcraze AB'
__all__ = ['WrongUriType', 'CommunicationException']
class WrongUriType (Exception):
""" Wrong type of URI for this interface """
pass
class CommunicationException (Exception):
""" Communication problem when communicating with a Crazyflie """
pass
| gpl-2.0 |
WoLpH/CouchPotatoServer | couchpotato/core/notifications/nmj/main.py | 10 | 3675 | from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import re
import telnetlib
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
class NMJ(Notification):
def __init__(self):
addEvent('renamer.after', self.addToLibrary)
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
def autoConfig(self, host = 'localhost', **kwargs):
mount = ''
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error('Warning: unable to get a telnet session to %s', host)
return self.failed()
log.debug('Connected to %s via telnet', host)
terminal.read_until('sh-3.00# ')
terminal.write('cat /tmp/source\n')
terminal.write('cat /tmp/netshare\n')
terminal.write('exit\n')
tnoutput = terminal.read_all()
match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput)
if match:
database = match.group(1)
device = match.group(2)
log.info('Found NMJ database %s on device %s', (database, device))
else:
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host)
return self.failed()
if device.startswith('NETWORK_SHARE/'):
match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput)
if match:
mount = match.group().replace('127.0.0.1', host)
log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount)
else:
log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url')
return self.failed()
return {
'success': True,
'database': database,
'mount': mount,
}
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
host = self.conf('host')
mount = self.conf('mount')
database = self.conf('database')
if mount:
log.debug('Try to mount network drive via url: %s', mount)
try:
self.urlopen(mount)
except:
return False
params = {
'arg0': 'scanner_start',
'arg1': database,
'arg2': 'background',
'arg3': '',
}
params = tryUrlencode(params)
UPDATE_URL = 'http://%(host)s:8008/metadata_database?%(params)s'
updateUrl = UPDATE_URL % {'host': host, 'params': params}
try:
response = self.urlopen(updateUrl)
except:
return False
try:
et = etree.fromstring(response)
result = et.findtext('returnValue')
except SyntaxError, e:
log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
return False
if int(result) > 0:
log.error('Popcorn Hour returned an errorcode: %s', result)
return False
else:
log.info('NMJ started background scan')
return True
def failed(self):
return {
'success': False
}
def test(self, **kwargs):
return {
'success': self.addToLibrary()
}
| gpl-3.0 |
yonglehou/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
miguel-negrao/supercollider | external_libraries/simplejson-2.3.2/__init__.py | 44 | 18618 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.3.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <[email protected]>'
from decimal import Decimal
from .decoder import JSONDecoder, JSONDecodeError
from .encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| gpl-3.0 |
ye11ow/phantomjs | src/breakpad/src/tools/gyp/test/library/gyptest-static.py | 430 | 2241 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple build of a "Hello, world!" program with static libraries,
including verifying that libraries are rebuilt correctly when functions
move between libraries.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib2',
chdir='relocate/src')
# Update program.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('Hello', 'Hello again')
test.write('relocate/src/program.c', contents)
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib2_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='relocate/src')
# Update program.c and lib2.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('again', 'again again')
test.write('relocate/src/program.c', contents)
# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
# the "moved" module. This should be done in gyp by adding a dependency
# on the generated .vcproj file itself.
test.touch('relocate/src/lib2.c')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.1/examples/minimal_contact_binary.py | 1 | 5694 | #!/usr/bin/env python
# coding: utf-8
# Minimal Contact Binary System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
# Here we'll initialize a default binary, but ask for it to be created as a contact system.
# In[3]:
b_cb = phoebe.default_binary(contact_binary=True)
# We'll compare this to the default detached binary
# In[4]:
b_detached = phoebe.default_binary()
# Hierarchy
# -------------
# Let's first look at the hierarchy of the default detached binary, and then compare that to the hierarchy of the overcontact system
# In[5]:
print b_detached.hierarchy
# In[6]:
print b_cb.hierarchy
# As you can see, the overcontact system has an additional "component" with method "envelope" and component label "contact_envelope".
#
# Next let's look at the parameters in the envelope and star components. You can see that most of parameters in the envelope class are constrained, while the equivalent radius of the primary is unconstrained. The value of primary equivalent radius constrains the potential and fillout factor of the envelope, as well as the equivalent radius of the secondary.
# In[7]:
print b_cb.filter(component='contact_envelope', kind='envelope', context='component')
# In[8]:
print b_cb.filter(component='primary', kind='star', context='component')
# In[9]:
b_cb['requiv@primary'] = 1.5
# In[10]:
b_cb['pot@contact_envelope@component']
# In[11]:
b_cb['fillout_factor@contact_envelope@component']
# In[12]:
b_cb['requiv@secondary@component']
# Now, of course, if we didn't originally know we wanted a contact binary and built the default detached system, we could still turn it into an contact binary just by changing the hierarchy.
# In[13]:
b_detached.add_component('envelope', component='contact_envelope')
# In[14]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'], b_detached['contact_envelope'])
print hier
# In[15]:
b_detached.set_hierarchy(hier)
# In[16]:
print b_detached.hierarchy
# However, since our system was detached, the system is not overflowing, and therefore doesn't pass system checks
# In[17]:
b_detached.run_checks()
# And because of this, the potential and requiv@secondary constraints cannot be computed
# In[18]:
b_detached['pot@component']
# In[19]:
b_detached['requiv@secondary@component']
# Likewise, we can make a contact system detached again simply by removing the envelope from the hierarchy. The parameters themselves will still exist (unless you remove them), so you can always just change the hierarchy again to change back to an overcontact system.
# In[20]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'])
print hier
# In[21]:
b_detached.set_hierarchy(hier)
# In[22]:
print b_detached.hierarchy
# Although the constraints have been removed, PHOEBE has lost the original value of the secondary radius (because of the failed contact constraints), so we'll have to reset that here as well.
# In[23]:
b_detached['requiv@secondary'] = 1.0
# Adding Datasets
# ---------------------
# In[24]:
b_cb.add_dataset('mesh', times=[0], dataset='mesh01')
# In[25]:
b_cb.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[26]:
b_cb.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[27]:
b_cb.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# For comparison, we'll do the same to our detached system
# In[28]:
b_detached.add_dataset('mesh', times=[0], dataset='mesh01')
# In[29]:
b_detached.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[30]:
b_detached.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[31]:
b_detached.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# Running Compute
# --------------------
# In[32]:
b_cb.run_compute(irrad_method='none')
# In[33]:
b_detached.run_compute(irrad_method='none')
# Synthetics
# ------------------
# To ensure compatibility with computing synthetics in detached and semi-detached systems in Phoebe, the synthetic meshes for our overcontact system are attached to each component separetely, instead of the contact envelope.
# In[34]:
print b_cb['mesh01@model'].components
# In[35]:
print b_detached['mesh01@model'].components
# Plotting
# ---------------
# ### Meshes
# In[36]:
afig, mplfig = b_cb['mesh01@model'].plot(x='ws', show=True)
# In[37]:
afig, mplfig = b_detached['mesh01@model'].plot(x='ws', show=True)
# ### Orbits
# In[38]:
afig, mplfig = b_cb['orb01@model'].plot(x='ws',show=True)
# In[39]:
afig, mplfig = b_detached['orb01@model'].plot(x='ws',show=True)
# ### Light Curves
# In[40]:
afig, mplfig = b_cb['lc01@model'].plot(show=True)
# In[41]:
afig, mplfig = b_detached['lc01@model'].plot(show=True)
# ### RVs
# In[42]:
afig, mplfig = b_cb['rv01@model'].plot(show=True)
# In[43]:
afig, mplfig = b_detached['rv01@model'].plot(show=True)
# In[ ]:
| gpl-3.0 |
caot/intellij-community | python/lib/Lib/site-packages/django/conf/locale/id/formats.py | 78 | 1855 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G:i:s"
TIME_FORMAT = 'G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09' , 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009' , 25/10/2009'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H:%M:%S', # '25-10-2009 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2009 14:30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H:%M:%S', # '25-10-09' 14:30:59'
'%d-%m-%y %H:%M', # '25-10-09' 14:30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H:%M:%S', # '25/10/2009 14:30:59'
'%m/%d/%Y %H:%M', # '25/10/2009 14:30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 |
erkrishna9/odoo | openerp/addons/base/tests/test_ir_actions.py | 48 | 19830 | import unittest2
from openerp.osv.orm import except_orm
import openerp.tests.common as common
from openerp.tools import mute_logger
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
cr, uid = self.cr, self.uid
# Models
self.ir_actions_server = self.registry('ir.actions.server')
self.ir_actions_client = self.registry('ir.actions.client')
self.ir_values = self.registry('ir.values')
self.ir_model = self.registry('ir.model')
self.ir_model_fields = self.registry('ir.model.fields')
self.res_partner = self.registry('res.partner')
self.res_country = self.registry('res.country')
# Data on which we will run the server action
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Model data
self.res_partner_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.partner')])[0]
self.res_partner_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'name')])[0]
self.res_partner_city_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'city')])[0]
self.res_partner_country_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'country_id')])[0]
self.res_partner_parent_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'parent_id')])[0]
self.res_country_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.country')])[0]
self.res_country_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'name')])[0]
self.res_country_code_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'code')])[0]
# create server action to
self.act_id = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
cr, uid = self.cr, self.uid
# Do: eval 'True' condition
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: eval False condition, that should be considered as True (void = True)
self.ir_actions_server.write(cr, uid, [self.act_id], {'condition': False})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
# Do: create contextual action
self.ir_actions_server.create_action(cr, uid, [self.act_id])
# Test: ir_values created
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 1, 'ir_actions_server: create_action should have created an entry in ir_values')
ir_value = self.ir_values.browse(cr, uid, ir_values_ids[0])
self.assertEqual(ir_value.value, 'ir.actions.server,%s' % self.act_id, 'ir_actions_server: created ir_values should reference the server action')
self.assertEqual(ir_value.model, 'res.partner', 'ir_actions_server: created ir_values should be linked to the action base model')
# Do: remove contextual action
self.ir_actions_server.unlink_action(cr, uid, [self.act_id])
# Test: ir_values removed
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 0, 'ir_actions_server: unlink_action should remove the ir_values record')
def test_10_code(self):
cr, uid = self.cr, self.uid
self.ir_actions_server.write(cr, uid, self.act_id, {
'state': 'code',
'code': """partner_name = obj.name + '_code'
self.pool["res.partner"].create(cr, uid, {"name": partner_name}, context=context)
workflow"""
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(pids), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_trigger(self):
cr, uid = self.cr, self.uid
# Data: code server action (at this point code-based actions should work)
act_id2 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction2',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
act_id3 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction3',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_country_model_id,
'state': 'code',
'code': 'obj.write({"code": "ZZ"})',
})
# Data: create workflows
partner_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.partner',
'on_create': True,
})
partner_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerStart',
'wkf_id': partner_wf_id,
'flow_start': True
})
partner_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerTwo',
'wkf_id': partner_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id2,
})
partner_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'partner_trans',
'act_from': partner_act1_id,
'act_to': partner_act2_id
})
country_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.country',
'on_create': True,
})
country_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryStart',
'wkf_id': country_wf_id,
'flow_start': True
})
country_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryTwo',
'wkf_id': country_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id3,
})
country_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'country_trans',
'act_from': country_act1_id,
'act_to': country_act2_id
})
# Data: re-create country and partner to benefit from the workflows
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry2',
'code': 'T2',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner2',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Run the action on partner object itself ('base')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'trigger',
'use_relational_model': 'base',
'wkf_model_id': self.res_partner_model_id,
'wkf_model_name': 'res.partner',
'wkf_transition_id': partner_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: incorrect signal trigger')
# Run the action on related country object ('relational')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_relational_model': 'relational',
'wkf_model_id': self.res_country_model_id,
'wkf_model_name': 'res.country',
'wkf_field_id': self.res_partner_country_field_id,
'wkf_transition_id': country_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_country.refresh()
self.assertEqual(self.test_country.code, 'ZZ', 'ir_actions_server: incorrect signal trigger')
# Clear workflow cache, otherwise openerp will try to create workflows even if it has been deleted
from openerp.workflow import clear_cache
clear_cache(cr, uid)
def test_30_client(self):
cr, uid = self.cr, self.uid
client_action_id = self.registry('ir.actions.client').create(cr, uid, {
'name': 'TestAction2',
'tag': 'Test',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'client_action',
'action_id': client_action_id,
})
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertEqual(res['name'], 'TestAction2', 'ir_actions_server: incorrect return result for a client action')
def test_40_crud_create(self):
cr, uid = self.cr, self.uid
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new',
'link_new_record': True,
'link_field_id': self.res_partner_parent_field_id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': _city})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.test_partner.refresh()
self.assertEqual(self.test_partner.parent_id.id, pids[0], 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_current',
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': 'TestCopyCurrent'}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': 'TestCity'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'TestCity', 'ir_actions_server: TODO')
self.assertEqual(partner.country_id.id, self.test_partner.country_id.id, 'ir_actions_server: TODO')
# Do: create a new record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'obj.name[0:2]', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'NY', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'NY', 'ir_actions_server: TODO')
self.assertEqual(country.address_format, 'SuperFormat', 'ir_actions_server: TODO')
def test_50_crud_write(self):
cr, uid = self.cr, self.uid
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_write',
'use_write': 'current',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'other',
'crud_model_id': self.res_country_model_id,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestNew')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'expression',
'crud_model_id': self.res_country_model_id,
'write_expression': 'object.country_id',
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_60_multi(self):
cr, uid = self.cr, self.uid
# Data: 2 server actions that will be nested
act1_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction1',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
# Do: create a new record in the same model and link it
act2_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction2',
'model_id': self.res_partner_model_id,
'state': 'object_create',
'use_create': 'copy_current',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'multi',
'child_ids': [(6, 0, [act1_id, act2_id])],
})
# Do: run the action
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_window', '')
# Test loops
self.assertRaises(except_orm, self.ir_actions_server.write, cr, uid, [self.act_id], {
'child_ids': [(6, 0, [self.act_id])]
})
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
RichardLitt/wyrd-django-dev | tests/regressiontests/localflavor/pt/tests.py | 13 | 1059 | from __future__ import unicode_literals
from django.contrib.localflavor.pt.forms import PTZipCodeField, PTPhoneNumberField
from django.test import SimpleTestCase
class PTLocalFlavorTests(SimpleTestCase):
def test_PTZipCodeField(self):
error_format = ['Enter a zip code in the format XXXX-XXX.']
valid = {
'3030-034': '3030-034',
'1003456': '1003-456',
}
invalid = {
'2A200': error_format,
'980001': error_format,
}
self.assertFieldOutput(PTZipCodeField, valid, invalid)
def test_PTPhoneNumberField(self):
error_format = ['Phone numbers must have 9 digits, or start by + or 00.']
valid = {
'917845189': '917845189',
'91 784 5189': '917845189',
'+351 91 111': '+35191111',
'00351873': '00351873',
}
invalid = {
'91 784 51 8': error_format,
'091 456 987 1': error_format,
}
self.assertFieldOutput(PTPhoneNumberField, valid, invalid)
| bsd-3-clause |
c86j224s/snippet | Python_asyncio_binary_echo/pyclient2/Lib/site-packages/pip/_internal/operations/freeze.py | 8 | 10025 | from __future__ import absolute_import
import collections
import logging
import os
import re
import warnings
from pip._vendor import pkg_resources, six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
from pip._internal.exceptions import InstallationError
from pip._internal.req import InstallRequirement
from pip._internal.req.req_file import COMMENT_RE
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
logger = logging.getLogger(__name__)
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
isolated=False,
wheel_cache=None,
exclude_editable=False,
skip=()):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = FrozenRequirement.from_dist(
dist,
dependency_links
)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
)
continue
if exclude_editable and req.editable:
continue
installations[req.name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set()
# keep track of which files a requirement is in so that we can
# give an accurate warning if a requirement appears multiple times.
req_files = collections.defaultdict(list)
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but that "
"package is not installed",
req_file_path,
COMMENT_RE.sub('', line).strip(),
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
req_files[line_req.name].append(req_file_path)
# Warn about requirements that were included multiple times (in a
# single requirements file or in different requirements files).
for name, files in six.iteritems(req_files):
if len(files) > 1:
logger.warning("Requirement %s included multiple times [%s]",
name, ', '.join(sorted(set(files))))
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip._internal.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req,
)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
warnings.warn(
"SVN editable detection based on dependency links "
"will be dropped in the future.",
RemovedInPip11Warning,
)
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
| apache-2.0 |
TeamSWAP/swap | external/pyinstaller/PyInstaller/cliutils/makespec.py | 10 | 1488 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Automatically build spec files containing a description of the project
"""
import optparse
import os
import PyInstaller.makespec
import PyInstaller.compat
import PyInstaller.log
from PyInstaller.utils import misc
def run():
misc.check_not_running_as_root()
p = optparse.OptionParser(
usage='python %prog [opts] <scriptname> [<scriptname> ...]'
)
PyInstaller.makespec.__add_options(p)
PyInstaller.log.__add_options(p)
PyInstaller.compat.__add_obsolete_options(p)
opts, args = p.parse_args()
PyInstaller.log.__process_options(p, opts)
# Split pathex by using the path separator
temppaths = opts.pathex[:]
opts.pathex = []
for p in temppaths:
opts.pathex.extend(p.split(os.pathsep))
if not args:
p.error('Requires at least one scriptname file')
try:
name = PyInstaller.makespec.main(args, **opts.__dict__)
print 'wrote %s' % name
print 'now run pyinstaller.py to build the executable'
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
| apache-2.0 |
Alexey-T/CudaText | app/py/sys/urllib3/util/timeout.py | 27 | 10003 | from __future__ import absolute_import
import time
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
# Use time.monotonic if available.
current_time = getattr(time, "monotonic", time.time)
class Timeout(object):
"""Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect = None
def __repr__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
self._read,
self.total,
)
# __str__ provided for backwards compatibility
__str__ = __repr__
@classmethod
def _validate_timeout(cls, value, name):
"""Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
# Python 3
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
return value
@classmethod
def from_float(cls, timeout):
"""Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
"""Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self):
"""Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
"""Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return current_time() - self._start_connect
@property
def connect_timeout(self):
"""Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
"""Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not self.DEFAULT_TIMEOUT
and self._read is not None
and self._read is not self.DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.