repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
azureplus/hue | desktop/core/ext-py/Django-1.6.10/docs/_ext/djangodocs.py | 40 | 7686 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from sphinx import addnodes, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
| apache-2.0 |
kantlove/flask-simple-page | Lib/site-packages/jinja2/meta.py | 336 | 4198 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
| mit |
shanot/imp | modules/pmi/benchmark/benchmark_loop_reconstruction.py | 1 | 4020 | import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.container
import IMP.benchmark
import time
import sys
import os
import IMP.pmi.restraints.stereochemistry
import IMP.pmi.representation as representation
import IMP.pmi.tools as tools
import IMP.pmi.samplers as samplers
import IMP.pmi.output as output
IMP.setup_from_argv(sys.argv, "Loop reconstruction benchmark.")
IMP.set_log_level(IMP.SILENT)
# this benchmark should take 5 sec to initialize and 3 sec per MC loop, for a total of 35 sec.
# Redirect chatty PMI output so we can see benchmark output
old_stdout = sys.stdout
class DummyFile(object):
def write(self, txt):
pass
sys.stdout = DummyFile()
# input parameter
pdbfile = IMP.pmi.get_data_path("benchmark_starting_structure.pdb")
fastafile = IMP.pmi.get_data_path("benchmark_sequence.fasta")
fastids = tools.get_ids_from_fasta_file(fastafile)
missing_bead_size = 1
# Component pdbfile chainid rgb color fastafile sequence id
# in fastafile
data = [("chainA", pdbfile, "A", 0.00000000, (fastafile, 0)),
("chainB", pdbfile, "B", 0.50000000, (fastafile, 0))]
# create the representation
log_objects = []
optimizable_objects = []
sw = tools.Stopwatch()
log_objects.append(sw)
m = IMP.Model()
r = representation.Representation(m)
hierarchies = {}
for d in data:
component_name = d[0]
pdb_file = d[1]
chain_id = d[2]
color_id = d[3]
fasta_file = d[4][0]
fasta_file_id = d[4][1]
# avoid to add a component with the same name
r.create_component(component_name,
color=color_id)
r.add_component_sequence(component_name,
fasta_file,
id=fastids[fasta_file_id])
hierarchies = r.autobuild_model(component_name,
pdb_file,
chain_id,
resolutions=[1, 10],
missingbeadsize=missing_bead_size)
r.show_component_table(component_name)
rbAB = r.set_rigid_bodies(["chainA", "chainB"])
r.set_floppy_bodies()
r.fix_rigid_bodies([rbAB])
r.setup_bonds()
log_objects.append(r)
listofexcludedpairs = []
lof = [(1, 12, "chainA"), (1, 12, "chainB"),
(294, 339, "chainA"), (294, 339, "chainB"),
(686, 701, "chainA"), (686, 701, "chainB"),
(454, 464, "chainA"), (454, 464, "chainB"),
(472, 486, "chainA"), (472, 486, "chainB"),
(814, 859, "chainA"), (814, 859, "chainB")]
# add bonds and angles
for l in lof:
rbr = IMP.pmi.restraints.stereochemistry.ResidueBondRestraint(r, l)
rbr.add_to_model()
listofexcludedpairs += rbr.get_excluded_pairs()
log_objects.append(rbr)
rar = IMP.pmi.restraints.stereochemistry.ResidueAngleRestraint(r, l)
rar.add_to_model()
listofexcludedpairs += rar.get_excluded_pairs()
log_objects.append(rar)
# add excluded volume
ev = IMP.pmi.restraints.stereochemistry.ExcludedVolumeSphere(
r,
resolution=10.0)
ev.add_excluded_particle_pairs(listofexcludedpairs)
ev.add_to_model()
log_objects.append(ev)
mc = samplers.MonteCarlo(m, [r], 1.0)
log_objects.append(mc)
start_time = time.clock()
# In debug mode things are way too slow to actually run MC
if IMP.get_check_level() < IMP.USAGE_AND_INTERNAL:
o = output.Output()
rmf = o.init_rmf("conformations.rmf3", [r.prot])
o.init_stat2("modeling.stat", log_objects)
o.write_rmf("conformations.rmf3")
o.init_pdb("conformations.pdb", r.prot)
for i in range(0, 10):
# print("Running job, frame number ", i)
mc.optimize(10)
o.write_rmf("conformations.rmf3")
o.write_pdbs()
o.write_stats2()
o.close_rmf("conformations.rmf3")
sys.stdout = old_stdout
IMP.benchmark.report("pmi loop", time.clock() - start_time, 3*10+5)
if IMP.get_check_level() < IMP.USAGE_AND_INTERNAL:
for output in ["conformations.pdb", "conformations.rmf3", "modeling.stat"]:
os.unlink(output)
| gpl-3.0 |
tungvx/deploy | .google_appengine/lib/webapp2/tests/extras_appengine_users_test.py | 24 | 2916 | # -*- coding: utf-8 -*-
import os
import webapp2
from webapp2_extras import users
import test_base
def set_current_user(email, user_id, is_admin=False):
os.environ['USER_EMAIL'] = email or ''
os.environ['USER_ID'] = user_id or ''
os.environ['USER_IS_ADMIN'] = '1' if is_admin else '0'
class LoginRequiredHandler(webapp2.RequestHandler):
@users.login_required
def get(self):
self.response.write('You are logged in.')
@users.login_required
def post(self):
self.response.write('You are logged in.')
class AdminRequiredHandler(webapp2.RequestHandler):
@users.admin_required
def get(self):
self.response.write('You are admin.')
@users.admin_required
def post(self):
self.response.write('You are admin.')
app = webapp2.WSGIApplication([
('/login_required', LoginRequiredHandler),
('/admin_required', AdminRequiredHandler),
])
class TestUsers(test_base.BaseTestCase):
def test_login_required_allowed(self):
set_current_user('[email protected]', '[email protected]')
req = webapp2.Request.blank('/login_required')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, 'You are logged in.')
def test_login_required_302(self):
req = webapp2.Request.blank('/login_required')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(rsp.headers.get('Location'),
'https://www.google.com/accounts/Login?continue=http%3A//localhost/login_required')
def test_login_required_post(self):
req = webapp2.Request.blank('/login_required')
req.method = 'POST'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 400)
def test_admin_required_allowed(self):
set_current_user('[email protected]', '[email protected]', is_admin=True)
req = webapp2.Request.blank('/admin_required')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 200)
self.assertEqual(rsp.body, 'You are admin.')
def test_admin_required_not_admin(self):
set_current_user('[email protected]', '[email protected]')
req = webapp2.Request.blank('/admin_required')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 403)
def test_admin_required_302(self):
req = webapp2.Request.blank('/admin_required')
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 302)
self.assertEqual(rsp.headers.get('Location'),
'https://www.google.com/accounts/Login?continue=http%3A//localhost/admin_required')
def test_admin_required_post(self):
req = webapp2.Request.blank('/admin_required')
req.method = 'POST'
rsp = req.get_response(app)
self.assertEqual(rsp.status_int, 400)
if __name__ == '__main__':
test_base.main()
| apache-2.0 |
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/openerp/workflow/instance.py | 314 | 5594 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
from openerp.workflow.helpers import Session
from openerp.workflow.helpers import Record
from openerp.workflow.workitem import WorkflowItem
class WorkflowInstance(object):
def __init__(self, session, record, values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not values:
values = {}
assert isinstance(values, dict)
self.instance = values
@classmethod
def create(cls, session, record, workflow_id):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(workflow_id, (int, long))
cr = session.cr
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id,state) values (%s,%s,%s,%s,%s) RETURNING id', (record.model, record.id, session.uid, workflow_id, 'active'))
instance_id = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (workflow_id,))
stack = []
activities = cr.dictfetchall()
for activity in activities:
WorkflowItem.create(session, record, activity, instance_id, stack)
cr.execute('SELECT * FROM wkf_instance WHERE id = %s', (instance_id,))
values = cr.dictfetchone()
wi = WorkflowInstance(session, record, values)
wi.update()
return wi
def delete(self):
self.session.cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
def validate(self, signal, force_running=False):
assert isinstance(signal, basestring)
assert isinstance(force_running, bool)
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
stack = []
for work_item_values in cr.dictfetchall():
wi = WorkflowItem(self.session, self.record, work_item_values)
wi.process(signal=signal, force_running=force_running, stack=stack)
# An action is returned
self._update_end()
return stack and stack[0] or False
def update(self):
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
for work_item_values in cr.dictfetchall():
stack = []
WorkflowItem(self.session, self.record, work_item_values).process(stack=stack)
return self._update_end()
def _update_end(self):
cr = self.session.cr
instance_id = self.instance['id']
cr.execute('select wkf_id from wkf_instance where id=%s', (instance_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (instance_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (instance_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (instance_id,))
for cur_instance_id, cur_model_name, cur_record_id in cr.fetchall():
cur_record = Record(cur_model_name, cur_record_id)
for act_name in act_names:
WorkflowInstance(self.session, cur_record, {'id':cur_instance_id}).validate('subflow.%s' % act_name[0])
return ok
def create(session, record, workflow_id):
return WorkflowInstance(session, record).create(workflow_id)
def delete(session, record):
return WorkflowInstance(session, record).delete()
def validate(session, record, instance_id, signal, force_running=False):
return WorkflowInstance(session, record).validate(instance_id, signal, force_running)
def update(session, record, instance_id):
return WorkflowInstance(session, record).update(instance_id)
def _update_end(session, record, instance_id):
return WorkflowInstance(session, record)._update_end(instance_id)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/django/contrib/gis/db/backends/mysql/operations.py | 100 | 2417 | from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.utils import six
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.backends.mysql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains' : 'MBRContains', # For consistency w/PostGIS API
'bboverlaps' : 'MBROverlaps', # .. ..
'contained' : 'MBRWithin', # .. ..
'contains' : 'MBRContains',
'disjoint' : 'MBRDisjoint',
'equals' : 'MBREqual',
'exact' : 'MBREqual',
'intersects' : 'MBRIntersects',
'overlaps' : 'MBROverlaps',
'same_as' : 'MBREqual',
'touches' : 'MBRTouches',
'within' : 'MBRWithin',
}
gis_terms = dict([(term, None) for term in list(geometry_functions) + ['isnull']])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = self.get_expression_column(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
return "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
| apache-2.0 |
mkrupcale/ansible | lib/ansible/modules/cloud/amazon/ec2_asg_facts.py | 11 | 11706 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_asg_facts
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
description:
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
version_added: "2.2"
author: "Rob White (@wimnat)"
options:
name:
description:
- The prefix or name of the auto scaling group(s) you are searching for.
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
required: false
tags:
description:
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Find all groups
- ec2_asg_facts:
register: asgs
# Find a group with matching name/prefix
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
# Find a group with matching tags
- ec2_asg_facts:
tags:
project: webapp
env: production
register: asgs
# Find a group with matching name/prefix and tags
- ec2_asg_facts:
name: myproject
tags:
env: production
register: asgs
# Fail if no groups are found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length == 0 }}"
# Fail if more than 1 group is found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length > 1 }}"
'''
RETURN = '''
---
auto_scaling_group_arn:
description: The Amazon Resource Name of the ASG
returned: success
type: string
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
auto_scaling_group_name:
description: Name of autoscaling group
returned: success
type: str
sample: "public-webapp-production-1"
availability_zones:
description: List of Availability Zones that are enabled for this ASG.
returned: success
type: list
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
created_time:
description: The date and time this ASG was created, in ISO 8601 format.
returned: success
type: string
sample: "2015-11-25T00:05:36.309Z"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
health_check_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
health_check_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
instances:
description: List of EC2 instances and their status as it relates to the ASG.
returned: success
type: list
sample: [
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-es22ad25",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": "false"
}
]
launch_configuration_name:
description: Name of launch configuration associated with the ASG.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancer_names:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
new_instances_protected_from_scale_in:
description: Whether or not new instances a protected from automatic scaling in.
returned: success
type: boolean
sample: "false"
placement_group:
description: Placement group into which instances are launched, if any.
returned: success
type: str
sample: None
status:
description: The current state of the group when DeleteAutoScalingGroup is in progress.
returned: success
type: str
sample: None
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def match_asg_tags(tags_to_match, asg):
for key, value in tags_to_match.iteritems():
for tag in asg['Tags']:
if key == tag['Key'] and value == tag['Value']:
break
else: return False
return True
def find_asgs(conn, module, name=None, tags=None):
"""
Args:
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
name (str): Optional name of the ASG you are looking for.
tags (dict): Optional dictionary of tags and values to search for.
Basic Usage:
>>> name = 'public-webapp-production'
>>> tags = { 'env': 'production' }
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
>>> results = find_asgs(name, conn)
Returns:
List
[
{
"auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00",
"default_cooldown": 300,
"desired_capacity": 2,
"enabled_metrics": [],
"health_check_grace_period": 300,
"health_check_type": "ELB",
"instances":
[
{
"availability_zone": "us-west-2c",
"health_status": "Healthy",
"instance_id": "i-047a12cb",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
},
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-7a29df2c",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": false
}
],
"launch_configuration_name": "public-webapp-production-1",
"load_balancer_names": ["public-webapp-production-lb"],
"max_size": 4,
"min_size": 2,
"new_instances_protected_from_scale_in": false,
"placement_group": None,
"status": None,
"suspended_processes": [],
"tags":
[
{
"key": "Name",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "public-webapp-production"
},
{
"key": "env",
"propagate_at_launch": true,
"resource_id": "public-webapp-production",
"resource_type": "auto-scaling-group",
"value": "production"
}
],
"termination_policies":
[
"Default"
],
"vpc_zone_identifier":
[
"subnet-a1b1c1d1",
"subnet-a2b2c2d2",
"subnet-a3b3c3d3"
]
}
]
"""
try:
asgs = conn.describe_auto_scaling_groups()
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
matched_asgs = []
if name is not None:
# if the user didn't specify a name
name_prog = re.compile(r'^' + name)
for asg in asgs['AutoScalingGroups']:
if name:
matched_name = name_prog.search(asg['AutoScalingGroupName'])
else:
matched_name = True
if tags:
matched_tags = match_asg_tags(tags, asg)
else:
matched_tags = True
if matched_name and matched_tags:
matched_asgs.append(camel_dict_to_snake_dict(asg))
return matched_asgs
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str'),
tags=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
asg_name = module.params.get('name')
asg_tags = module.params.get('tags')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
stevehof/CouchPotatoServer | couchpotato/core/media/_base/matcher/base.py | 81 | 2320 | from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class MatcherBase(Plugin):
type = None
def __init__(self):
if self.type:
addEvent('%s.matcher.correct' % self.type, self.correct)
def correct(self, chain, release, media, quality):
raise NotImplementedError()
def flattenInfo(self, info):
# Flatten dictionary of matches (chain info)
if isinstance(info, dict):
return dict([(key, self.flattenInfo(value)) for key, value in info.items()])
# Flatten matches
result = None
for match in info:
if isinstance(match, dict):
if result is None:
result = {}
for key, value in match.items():
if key not in result:
result[key] = []
result[key].append(value)
else:
if result is None:
result = []
result.append(match)
return result
def constructFromRaw(self, match):
if not match:
return None
parts = [
''.join([
y for y in x[1:] if y
]) for x in match
]
return ''.join(parts)[:-1].strip()
def simplifyValue(self, value):
if not value:
return value
if isinstance(value, basestring):
return simplifyString(value)
if isinstance(value, list):
return [self.simplifyValue(x) for x in value]
raise ValueError("Unsupported value type")
def chainMatch(self, chain, group, tags):
info = self.flattenInfo(chain.info[group])
found_tags = []
for tag, accepted in tags.items():
values = [self.simplifyValue(x) for x in info.get(tag, [None])]
if any([val in accepted for val in values]):
found_tags.append(tag)
log.debug('tags found: %s, required: %s' % (found_tags, tags.keys()))
if set(tags.keys()) == set(found_tags):
return True
return all([key in found_tags for key, value in tags.items()])
| gpl-3.0 |
sisirkoppaka/articur8 | articurate/nertagger/celery_tasks.py | 1 | 4928 | from __future__ import absolute_import
#Because nertag.py imports below, but isn't available in context of workers
import re
import os
from nltk.tag.stanford import NERTagger
import time
import pickle
from itertools import izip
from collections import defaultdict
from celery import chord, group
from articurate.celery import celery
from articurate.nertagger import nertag
import articurate.utils.loader as article_loader
import articurate
from articurate.metrics import metrics
from celery import current_task
from celery.utils.log import get_task_logger
from articurate.utils.config import *
from articurate.pymotherlode import api
import simplejson as json
logger = get_task_logger(__name__)
@celery.task
def run_nertag():
# get latest dump of articles
#articles = article_loader.get_latest_dump()
articles = article_loader.get_all_dumps()
print "Got so many articles: ", len(articles)
ner_types = ['ORGANIZATION', 'LOCATION', 'PERSON']
try:
print "run_nertag: starting ", ner_types
if config['nertag.content']:
all_content = [article.content for count, article in enumerate(articles)]
else:
all_content = [article.title for count, article in enumerate(articles)]
result = chord(parse_NER_celery.s(article, count, ner_types) for count, article in enumerate(all_content))(save_celery.s(kwargs={'ner_types': ner_types}))
print "run_nertag: done! ", ner_types
return 'True'
except:
return 'False'
@celery.task
@metrics.track
def save_celery(results, **kwargs):
print "save_celery: starting "
ner_types = ['ORGANIZATION', 'LOCATION', 'PERSON']
# get what is currently present in redis db
try:
final_dict = json.loads(api.getMetric("articurate.nertagger.celery_tasks.save_celery"))
except:
final_dict = None
pass
if final_dict == None:
final_dict = {}
for item in ner_types:
value = final_dict[item] if item in final_dict else []
for dictionary in results:
value.extend(dictionary[item])
value = list(set(value))
final_dict[item] = value
# # save result to file
# final_dict = {}
# for item in ner_types:
# value = []
# for dictionary in results:
# value.extend(dictionary[item])
# value = list(set(value))
# final_dict[item] = value
ner_file = open("nertagger.log",'w')
ner_file.write(json.dumps(final_dict, indent=" "))
ner_file.close()
print "save_celery: done! "
return json.dumps(final_dict, indent=" ")
@celery.task
def parse_NER_celery(document, articleCount, ner_types):
print "Starting document no. %d"%articleCount
result = {} # stores list of tokens associated with each ner_type
for item in ner_types:
result[item] = []
try:
# split text into sentences
sentenceEnders = re.compile('[.!?]')
sentences = sentenceEnders.split(document)
total = len(sentences)
#initialize paths
englishPath = os.path.join(os.path.join(os.path.dirname(articurate.__file__),'nertagger'),'english.all.3class.distsim.crf.ser.gz')
stanfordNERPath = os.path.join(os.path.join(os.path.dirname(articurate.__file__),'nertagger'),'stanford-ner.jar')
# initialize tagger
st = NERTagger(englishPath, stanfordNERPath)
# tag each sentence
for count, sentence in enumerate(sentences):
print "%d:%d/%d"%(articleCount, count, len(sentences))
tags = st.tag(sentence.encode('utf-8').split())
if len(tags) < 2:
continue
previous_tag = tags[0][1]
string = tags[0][0].lower()
index = 1
while index < len(tags):
current_tag = tags[index][1]
if current_tag == previous_tag:
string = string + " " + tags[index][0].lower()
else:
if previous_tag in ner_types:
value = result[previous_tag]
value.append(string.lower())
result[previous_tag] = value
string = tags[index][0].lower()
previous_tag = current_tag
index = index + 1
if previous_tag in ner_types:
value = result[previous_tag]
value.append(string.lower())
result[previous_tag] = value
# convert to set
for item in ner_types:
value = result[item]
value = list(set(value))
result[item] = value
except:
pass
print "Article number done:", articleCount, "\n"
return result
| mit |
jkugler/ansible | contrib/inventory/abiquo.py | 39 | 8967 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Abiquo
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Abiquo API
Requires some python libraries, ensure to have them installed when using this script.
This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
Before using this script you may want to modify abiquo.ini config file.
This script generates an Ansible hosts file with these host groups:
ABQ_xxx: Defines a hosts itself by Abiquo VM name label
all: Contains all hosts defined in Abiquo user's enterprise
virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
imagetemplate: Creates a host group for each image template containing all hosts using it
'''
# (c) 2014, Daniel Beneyto <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import ConfigParser
import urllib2
import base64
try:
import json
except ImportError:
import simplejson as json
def api_get(link, config):
try:
if link == None:
request = urllib2.Request(config.get('api','uri')+config.get('api','login_path'))
request.add_header("Accept",config.get('api','login_type'))
else:
request = urllib2.Request(link['href']+'?limit=0')
request.add_header("Accept",link['type'])
# Auth
base64string = base64.encodestring('%s:%s' % (config.get('auth','apiuser'),config.get('auth','apipass'))).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
return json.loads(result.read())
except:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache','cache_dir')
try:
cache = open('/'.join([dpath,'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache','cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache','cache_dir'):
dpath = config.get('cache','cache_dir')
try:
existing = os.stat( '/'.join([dpath,'inventory']))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)):
return True
return False
def generate_inv_from_api(enterprise_entity,config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
inventory['all']['hosts'] = []
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity,config)
vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines'))
vms = api_get(vms_entity,config)
for vmcollection in vms['collection']:
vm_vapp = next(link for link in (vmcollection['links']) if (link['rel']=='virtualappliance'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_vdc = next(link for link in (vmcollection['links']) if (link['rel']=='virtualdatacenter'))['title'].replace('[','').replace(']','').replace(' ','_')
vm_template = next(link for link in (vmcollection['links']) if (link['rel']=='virtualmachinetemplate'))['title'].replace('[','').replace(']','').replace(' ','_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if (config.getboolean('defaults', 'public_ip_only')) == True:
for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'):
vm_nic = link['title']
break
else:
vm_nic = None
# Otherwise, assigning defined network interface IP address
else:
for link in vmcollection['links']:
if (link['rel']==config.get('defaults', 'default_net_interface')):
vm_nic = link['title']
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')):
vm_state = False
if not vm_nic == None and vm_state:
if not vm_vapp in inventory.keys():
inventory[vm_vapp] = {}
inventory[vm_vapp]['children'] = []
inventory[vm_vapp]['hosts'] = []
if not vm_vdc in inventory.keys():
inventory[vm_vdc] = {}
inventory[vm_vdc]['hosts'] = []
inventory[vm_vdc]['children'] = []
if not vm_template in inventory.keys():
inventory[vm_template] = {}
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') == True:
meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata'))
try:
metadata = api_get(meta_entity,config)
if (config.getfloat("api","version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
inventory['_meta']['hostvars'][vm_nic] = vm_metadata
except Exception as e:
pass
inventory[vm_vapp]['children'].append(vmcollection['name'])
inventory[vm_vdc]['children'].append(vmcollection['name'])
inventory[vm_template]['children'].append(vmcollection['name'])
inventory['all']['children'].append(vmcollection['name'])
inventory[vmcollection['name']] = []
inventory[vmcollection['name']].append(vm_nic)
return inventory
except Exception as e:
# Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
if cache_available(config):
inv = get_cache('inventory', config)
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise,config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
login = api_get(None,config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise'))
except Exception as e:
enterprise = None
if cache_available(config):
inventory = get_cache('inventory', config)
else:
inventory = get_inventory(enterprise, config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
| gpl-3.0 |
Foxfanmedium/python_training | OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/IPython/core/magics/script.py | 1 | 8823 | """Magic functions for running cells in various scripts."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import os
import sys
import signal
import time
from subprocess import Popen, PIPE
import atexit
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics, magics_class, line_magic, cell_magic
)
from IPython.lib.backgroundjobs import BackgroundJobManager
from IPython.utils import py3compat
from IPython.utils.process import arg_split
from traitlets import List, Dict, default
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
def script_args(f):
"""single decorator for adding script args"""
args = [
magic_arguments.argument(
'--out', type=str,
help="""The variable in which to store stdout from the script.
If the script is backgrounded, this will be the stdout *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--err', type=str,
help="""The variable in which to store stderr from the script.
If the script is backgrounded, this will be the stderr *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with --out/err.
"""
),
magic_arguments.argument(
'--proc', type=str,
help="""The variable in which to store Popen instance.
This is used only when --bg option is given.
"""
),
]
for arg in args:
f = arg(f)
return f
@magics_class
class ScriptMagics(Magics):
"""Magics for talking to scripts
This defines a base `%%script` cell magic for running a cell
with a program in a subprocess, and registers a few top-level
magics that call %%script with common interpreters.
"""
script_magics = List(
help="""Extra script cell magics to define
This generates simple wrappers of `%%script foo` as `%%foo`.
If you want to add script magics that aren't on your path,
specify them in script_paths
""",
).tag(config=True)
@default('script_magics')
def _script_magics_default(self):
"""default to a common list of programs"""
defaults = [
'sh',
'bash',
'perl',
'ruby',
'python',
'python2',
'python3',
'pypy',
]
if os.name == 'nt':
defaults.extend([
'cmd',
])
return defaults
script_paths = Dict(
help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
Only necessary for items in script_magics where the default path will not
find the right interpreter.
"""
).tag(config=True)
def __init__(self, shell=None):
super(ScriptMagics, self).__init__(shell=shell)
self._generate_script_magics()
self.job_manager = BackgroundJobManager()
self.bg_processes = []
atexit.register(self.kill_bg_processes)
def __del__(self):
self.kill_bg_processes()
def _generate_script_magics(self):
cell_magics = self.magics['cell']
for name in self.script_magics:
cell_magics[name] = self._make_script_magic(name)
def _make_script_magic(self, name):
"""make a named magic, that calls %%script with a particular program"""
# expand to explicit path if necessary:
script = self.script_paths.get(name, name)
@magic_arguments.magic_arguments()
@script_args
def named_script_magic(line, cell):
# if line, add it as cl-flags
if line:
line = "%s %s" % (script, line)
else:
line = script
return self.shebang(line, cell)
# write a basic docstring:
named_script_magic.__doc__ = \
"""%%{name} script magic
Run cells with {script} in a subprocess.
This is a shortcut for `%%script {script}`
""".format(**locals())
return named_script_magic
@magic_arguments.magic_arguments()
@script_args
@cell_magic("script")
def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
argv = arg_split(line, posix = not sys.platform.startswith('win'))
args, cmd = self.shebang.parser.parse_known_args(argv)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
if args.out:
self.shell.user_ns[args.out] = p.stdout
if args.err:
self.shell.user_ns[args.err] = p.stderr
self.job_manager.new(self._run_script, p, cell, daemon=True)
if args.proc:
self.shell.user_ns[args.proc] = p
return
try:
out, err = p.communicate(cell)
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
time.sleep(0.1)
if p.poll() is not None:
print("Process is interrupted.")
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("Process is terminated.")
return
p.kill()
print("Process is killed.")
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" \
% (p.pid, e))
return
out = py3compat.decode(out)
err = py3compat.decode(err)
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def _run_script(self, p, cell):
"""callback for running the script in the background"""
p.stdin.write(cell)
p.stdin.close()
p.wait()
@line_magic("killbgscripts")
def killbgscripts(self, _nouse_=''):
"""Kill all BG processes started by %%script and its family."""
self.kill_bg_processes()
print("All background processes were killed.")
def kill_bg_processes(self):
"""Kill all BG processes which are still running."""
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.send_signal(signal.SIGINT)
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.terminate()
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.kill()
except:
pass
self._gc_bg_processes()
def _gc_bg_processes(self):
self.bg_processes = [p for p in self.bg_processes if p.poll() is None]
| apache-2.0 |
vipmunot/Data-Analysis-using-Python | Apis and Scraping/Intermediate APIs-118.py | 1 | 2765 | ## 2. API Authentication ##
# Create a dictionary of headers containing our Authorization header.
headers = {"Authorization": "token 1f36137fbbe1602f779300dad26e4c1b7fbab631"}
# Make a GET request to the GitHub API with our headers.
# This API endpoint will give us details about Vik Paruchuri.
response = requests.get("https://api.github.com/users/VikParuchuri", headers=headers)
# Print the content of the response. As you can see, this token corresponds to the account of Vik Paruchuri.
print(response.json())
response = requests.get("https://api.github.com/users/VikParuchuri/orgs", headers=headers)
orgs = response.json()
## 3. Endpoints and Objects ##
# We've loaded headers in.
response = requests.get("https://api.github.com/users/torvalds", headers=headers)
torvalds = response.json()
## 4. Other Objects ##
# Enter your answer here.
response = requests.get("https://api.github.com/repos/octocat/Hello-World", headers=headers)
hello_world = response.json()
## 5. Pagination ##
params = {"per_page": 50, "page": 1}
response = requests.get("https://api.github.com/users/VikParuchuri/starred", headers=headers, params=params)
page1_repos = response.json()
response = requests.get("https://api.github.com/users/VikParuchuri/starred", headers=headers, params={"per_page": 50, "page": 2})
page2_repos = response.json()
## 6. User-Level Endpoints ##
# Enter your code here.
response = requests.get("https://api.github.com/user", headers=headers)
user = response.json()
## 7. POST Requests ##
# Create the data we'll pass into the API endpoint. While this endpoint only requires the "name" key, there are other optional keys.
payload = {"name": "test"}
# We need to pass in our authentication headers!
response = requests.post("https://api.github.com/user/repos", json=payload, headers=headers)
print(response.status_code)
payload = {"name": "learning-about-apis"}
response = requests.post("https://api.github.com/user/repos", json=payload, headers=headers)
status = response.status_code
## 8. PUT/PATCH Requests ##
payload = {"description": "The best repository ever!", "name": "test"}
response = requests.patch("https://api.github.com/repos/VikParuchuri/test", json=payload, headers=headers)
print(response.status_code)
payload = {"description": "Learning about requests!", "name": "learning-about-apis"}
response = requests.patch("https://api.github.com/repos/VikParuchuri/learning-about-apis", json=payload, headers=headers)
status = response.status_code
## 9. DELETE Requests ##
response = requests.delete("https://api.github.com/repos/VikParuchuri/test", headers=headers)
print(response.status_code)
response = requests.delete("https://api.github.com/repos/VikParuchuri/learning-about-apis", headers=headers)
status = response.status_code | mit |
carolineLe/miasm | test/utils/test.py | 7 | 1552 | class Test(object):
"Stand for a test to run"
def __init__(self, command_line, base_dir="", depends=None,
products=None, tags=None, executable=None):
"""Create a Test instance.
@command_line: list of string standing for arguments to launch
@base_dir: base directory for launch
@depends: list of Test instance indicating dependencies
@products: elements produced to remove after tests
@tags: list of str indicating current test categories
@executable: if set, use this binary instead of Python
"""
self.command_line = command_line
self.base_dir = base_dir
self.depends = depends if depends else []
self.products = products if products else []
self.tags = tags if tags else []
self.executable = executable
def __repr__(self):
displayed = ["command_line", "base_dir", "depends", "products", "tags"]
displayed.append("python" if not self.executable else self.executable)
return "<Test " + \
" ".join("%s=%s" % (n, getattr(self,n)) for n in displayed ) + ">"
def __eq__(self, test):
if not isinstance(test, Test):
return False
return all([self.command_line == test.command_line,
self.base_dir == test.base_dir,
self.depends == test.depends,
self.products == test.products,
self.tags == test.tags,
self.executable == test.executable,
])
| gpl-2.0 |
kangbiao/tornado | tornado/test/iostream_test.py | 14 | 41539 | from __future__ import absolute_import, division, print_function, with_statement
from tornado.concurrent import Future
from tornado import gen
from tornado import netutil
from tornado.iostream import IOStream, SSLIOStream, PipeIOStream, StreamClosedError
from tornado.httputil import HTTPHeaders
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket
from tornado.stack_context import NullContext
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog, gen_test
from tornado.test.util import unittest, skipIfNonUnix, refusing_port
from tornado.web import RequestHandler, Application
import errno
import logging
import os
import platform
import socket
import ssl
import sys
try:
from unittest import mock # python 3.3
except ImportError:
try:
import mock # third-party mock package
except ImportError:
mock = None
def _server_ssl_options():
return dict(
certfile=os.path.join(os.path.dirname(__file__), 'test.crt'),
keyfile=os.path.join(os.path.dirname(__file__), 'test.key'),
)
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello")
class TestIOStreamWebMixin(object):
def _make_client_iostream(self):
raise NotImplementedError()
def get_app(self):
return Application([('/', HelloHandler)])
def test_connection_closed(self):
# When a server sends a response and then closes the connection,
# the client must be allowed to read the data before the IOStream
# closes itself. Epoll reports closed connections with a separate
# EPOLLRDHUP event delivered at the same time as the read event,
# while kqueue reports them as a second read/write event with an EOF
# flag.
response = self.fetch("/", headers={"Connection": "close"})
response.rethrow()
def test_read_until_close(self):
stream = self._make_client_iostream()
stream.connect(('127.0.0.1', self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"GET / HTTP/1.0\r\n\r\n")
stream.read_until_close(self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 200"))
self.assertTrue(data.endswith(b"Hello"))
def test_read_zero_bytes(self):
self.stream = self._make_client_iostream()
self.stream.connect(("127.0.0.1", self.get_http_port()),
callback=self.stop)
self.wait()
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
# normal read
self.stream.read_bytes(9, self.stop)
data = self.wait()
self.assertEqual(data, b"HTTP/1.1 ")
# zero bytes
self.stream.read_bytes(0, self.stop)
data = self.wait()
self.assertEqual(data, b"")
# another normal read
self.stream.read_bytes(3, self.stop)
data = self.wait()
self.assertEqual(data, b"200")
self.stream.close()
def test_write_while_connecting(self):
stream = self._make_client_iostream()
connected = [False]
def connected_callback():
connected[0] = True
self.stop()
stream.connect(("127.0.0.1", self.get_http_port()),
callback=connected_callback)
# unlike the previous tests, try to write before the connection
# is complete.
written = [False]
def write_callback():
written[0] = True
self.stop()
stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n",
callback=write_callback)
self.assertTrue(not connected[0])
# by the time the write has flushed, the connection callback has
# also run
try:
self.wait(lambda: connected[0] and written[0])
finally:
logging.debug((connected, written))
stream.read_until_close(self.stop)
data = self.wait()
self.assertTrue(data.endswith(b"Hello"))
stream.close()
@gen_test
def test_future_interface(self):
"""Basic test of IOStream's ability to return Futures."""
stream = self._make_client_iostream()
connect_result = yield stream.connect(
("127.0.0.1", self.get_http_port()))
self.assertIs(connect_result, stream)
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
first_line = yield stream.read_until(b"\r\n")
self.assertEqual(first_line, b"HTTP/1.1 200 OK\r\n")
# callback=None is equivalent to no callback.
header_data = yield stream.read_until(b"\r\n\r\n", callback=None)
headers = HTTPHeaders.parse(header_data.decode('latin1'))
content_length = int(headers['Content-Length'])
body = yield stream.read_bytes(content_length)
self.assertEqual(body, b'Hello')
stream.close()
@gen_test
def test_future_close_while_reading(self):
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
with self.assertRaises(StreamClosedError):
yield stream.read_bytes(1024 * 1024)
stream.close()
@gen_test
def test_future_read_until_close(self):
# Ensure that the data comes through before the StreamClosedError.
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
yield stream.read_until(b"\r\n\r\n")
body = yield stream.read_until_close()
self.assertEqual(body, b"Hello")
# Nothing else to read; the error comes immediately without waiting
# for yield.
with self.assertRaises(StreamClosedError):
stream.read_bytes(1)
class TestIOStreamMixin(object):
def _make_server_iostream(self, connection, **kwargs):
raise NotImplementedError()
def _make_client_iostream(self, connection, **kwargs):
raise NotImplementedError()
def make_iostream_pair(self, **kwargs):
listener, port = bind_unused_port()
streams = [None, None]
def accept_callback(connection, address):
streams[0] = self._make_server_iostream(connection, **kwargs)
self.stop()
def connect_callback():
streams[1] = client_stream
self.stop()
netutil.add_accept_handler(listener, accept_callback,
io_loop=self.io_loop)
client_stream = self._make_client_iostream(socket.socket(), **kwargs)
client_stream.connect(('127.0.0.1', port),
callback=connect_callback)
self.wait(condition=lambda: all(streams))
self.io_loop.remove_handler(listener.fileno())
listener.close()
return streams
def test_streaming_callback_with_data_in_buffer(self):
server, client = self.make_iostream_pair()
client.write(b"abcd\r\nefgh")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(data, b"abcd\r\n")
def closed_callback(chunk):
self.fail()
server.read_until_close(callback=closed_callback,
streaming_callback=self.stop)
# self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
data = self.wait()
self.assertEqual(data, b"efgh")
server.close()
client.close()
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
server, client = self.make_iostream_pair()
server.write(b'', callback=self.stop)
self.wait()
server.close()
client.close()
def test_connection_refused(self):
# When a connection is refused, the connect callback should not
# be run. (The kqueue IOLoop used to behave differently from the
# epoll IOLoop in this respect)
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
stream = IOStream(socket.socket(), self.io_loop)
self.connect_called = False
def connect_callback():
self.connect_called = True
self.stop()
stream.set_close_callback(self.stop)
# log messages vary by platform and ioloop implementation
with ExpectLog(gen_log, ".*", required=False):
stream.connect(("127.0.0.1", port), connect_callback)
self.wait()
self.assertFalse(self.connect_called)
self.assertTrue(isinstance(stream.error, socket.error), stream.error)
if sys.platform != 'cygwin':
_ERRNO_CONNREFUSED = (errno.ECONNREFUSED,)
if hasattr(errno, "WSAECONNREFUSED"):
_ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,)
# cygwin's errnos don't match those used on native windows python
self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
@unittest.skipIf(mock is None, 'mock package not present')
def test_gaierror(self):
# Test that IOStream sets its exc_info on getaddrinfo error.
# It's difficult to reliably trigger a getaddrinfo error;
# some resolvers own't even return errors for malformed names,
# so we mock it instead. If IOStream changes to call a Resolver
# before sock.connect, the mock target will need to change too.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = IOStream(s, io_loop=self.io_loop)
stream.set_close_callback(self.stop)
with mock.patch('socket.socket.connect',
side_effect=socket.gaierror('boom')):
with ExpectLog(gen_log, "Connect error"):
stream.connect(('localhost', 80), callback=self.stop)
self.wait()
self.assertIsInstance(stream.error, socket.gaierror)
def test_read_callback_error(self):
# Test that IOStream sets its exc_info when a read callback throws
server, client = self.make_iostream_pair()
try:
server.set_close_callback(self.stop)
with ExpectLog(
app_log, "(Uncaught exception|Exception in callback)"
):
# Clear ExceptionStackContext so IOStream catches error
with NullContext():
server.read_bytes(1, callback=lambda data: 1 / 0)
client.write(b"1")
self.wait()
self.assertTrue(isinstance(server.error, ZeroDivisionError))
finally:
server.close()
client.close()
def test_streaming_callback(self):
server, client = self.make_iostream_pair()
try:
chunks = []
final_called = []
def streaming_callback(data):
chunks.append(data)
self.stop()
def final_callback(data):
self.assertFalse(data)
final_called.append(True)
self.stop()
server.read_bytes(6, callback=final_callback,
streaming_callback=streaming_callback)
client.write(b"1234")
self.wait(condition=lambda: chunks)
client.write(b"5678")
self.wait(condition=lambda: final_called)
self.assertEqual(chunks, [b"1234", b"56"])
# the rest of the last chunk is still in the buffer
server.read_bytes(2, callback=self.stop)
data = self.wait()
self.assertEqual(data, b"78")
finally:
server.close()
client.close()
def test_streaming_until_close(self):
server, client = self.make_iostream_pair()
try:
chunks = []
closed = [False]
def streaming_callback(data):
chunks.append(data)
self.stop()
def close_callback(data):
assert not data, data
closed[0] = True
self.stop()
client.read_until_close(callback=close_callback,
streaming_callback=streaming_callback)
server.write(b"1234")
self.wait(condition=lambda: len(chunks) == 1)
server.write(b"5678", self.stop)
self.wait()
server.close()
self.wait(condition=lambda: closed[0])
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_streaming_until_close_future(self):
server, client = self.make_iostream_pair()
try:
chunks = []
@gen.coroutine
def client_task():
yield client.read_until_close(streaming_callback=chunks.append)
@gen.coroutine
def server_task():
yield server.write(b"1234")
yield gen.sleep(0.01)
yield server.write(b"5678")
server.close()
@gen.coroutine
def f():
yield [client_task(), server_task()]
self.io_loop.run_sync(f)
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
server.close()
client.close()
def test_delayed_close_callback(self):
# The scenario: Server closes the connection while there is a pending
# read that can be served out of buffered data. The client does not
# run the close_callback as soon as it detects the close, but rather
# defers it until after the buffered read has finished.
server, client = self.make_iostream_pair()
try:
client.set_close_callback(self.stop)
server.write(b"12")
chunks = []
def callback1(data):
chunks.append(data)
client.read_bytes(1, callback2)
server.close()
def callback2(data):
chunks.append(data)
client.read_bytes(1, callback1)
self.wait() # stopped by close_callback
self.assertEqual(chunks, [b"1", b"2"])
finally:
server.close()
client.close()
def test_future_delayed_close_callback(self):
# Same as test_delayed_close_callback, but with the future interface.
server, client = self.make_iostream_pair()
# We can't call make_iostream_pair inside a gen_test function
# because the ioloop is not reentrant.
@gen_test
def f(self):
server.write(b"12")
chunks = []
chunks.append((yield client.read_bytes(1)))
server.close()
chunks.append((yield client.read_bytes(1)))
self.assertEqual(chunks, [b"1", b"2"])
try:
f(self)
finally:
server.close()
client.close()
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the IOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the IOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
server, client = self.make_iostream_pair(read_chunk_size=256)
try:
server.write(b"A" * 512)
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
server.close()
# Allow the close to propagate to the client side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
self.io_loop.add_timeout(self.io_loop.time() + 0.01, self.stop)
self.wait()
client.read_bytes(256, self.stop)
data = self.wait()
self.assertEqual(b"A" * 256, data)
finally:
server.close()
client.close()
def test_read_until_close_after_close(self):
# Similar to test_delayed_close_callback, but read_until_close takes
# a separate code path so test it separately.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
# Read one byte to make sure the client has received the data.
# It won't run the close callback as long as there is more buffered
# data that could satisfy a later read.
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
client.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"234")
finally:
server.close()
client.close()
def test_streaming_read_until_close_after_close(self):
# Same as the preceding test but with a streaming_callback.
# All data should go through the streaming callback,
# and the final read callback just gets an empty string.
server, client = self.make_iostream_pair()
try:
server.write(b"1234")
server.close()
client.read_bytes(1, self.stop)
data = self.wait()
self.assertEqual(data, b"1")
streaming_data = []
client.read_until_close(self.stop,
streaming_callback=streaming_data.append)
data = self.wait()
self.assertEqual(b'', data)
self.assertEqual(b''.join(streaming_data), b"234")
finally:
server.close()
client.close()
def test_large_read_until(self):
# Performance test: read_until used to have a quadratic component
# so a read_until of 4MB would take 8 seconds; now it takes 0.25
# seconds.
server, client = self.make_iostream_pair()
try:
# This test fails on pypy with ssl. I think it's because
# pypy's gc defeats moves objects, breaking the
# "frozen write buffer" assumption.
if (isinstance(server, SSLIOStream) and
platform.python_implementation() == 'PyPy'):
raise unittest.SkipTest(
"pypy gc causes problems with openssl")
NUM_KB = 4096
for i in range(NUM_KB):
client.write(b"A" * 1024)
client.write(b"\r\n")
server.read_until(b"\r\n", self.stop)
data = self.wait()
self.assertEqual(len(data), NUM_KB * 1024 + 2)
finally:
server.close()
client.close()
def test_close_callback_with_pending_read(self):
# Regression test for a bug that was introduced in 2.3
# where the IOStream._close_callback would never be called
# if there were pending reads.
OK = b"OK\r\n"
server, client = self.make_iostream_pair()
client.set_close_callback(self.stop)
try:
server.write(OK)
client.read_until(b"\r\n", self.stop)
res = self.wait()
self.assertEqual(res, OK)
server.close()
client.read_until(b"\r\n", lambda x: x)
# If _close_callback (self.stop) is not called,
# an AssertionError: Async operation timed out after 5 seconds
# will be raised.
res = self.wait()
self.assertTrue(res is None)
finally:
server.close()
client.close()
@skipIfNonUnix
def test_inline_read_error(self):
# An error on an inline read is raised without logging (on the
# assumption that it will eventually be noticed or logged further
# up the stack).
#
# This test is posix-only because windows os.close() doesn't work
# on socket FDs, but we can't close the socket object normally
# because we won't get the error we want if the socket knows
# it's closed.
server, client = self.make_iostream_pair()
try:
os.close(server.socket.fileno())
with self.assertRaises(socket.error):
server.read_bytes(1, lambda data: None)
finally:
server.close()
client.close()
def test_async_read_error_logging(self):
# Socket errors on asynchronous reads should be logged (but only
# once).
server, client = self.make_iostream_pair()
server.set_close_callback(self.stop)
try:
# Start a read that will be fulfilled asynchronously.
server.read_bytes(1, lambda data: None)
client.write(b'a')
# Stub out read_from_fd to make it fail.
def fake_read_from_fd():
os.close(server.socket.fileno())
server.__class__.read_from_fd(server)
server.read_from_fd = fake_read_from_fd
# This log message is from _handle_read (not read_from_fd).
with ExpectLog(gen_log, "error on read"):
self.wait()
finally:
server.close()
client.close()
def test_future_close_callback(self):
# Regression test for interaction between the Future read interfaces
# and IOStream._maybe_add_error_listener.
server, client = self.make_iostream_pair()
closed = [False]
def close_callback():
closed[0] = True
self.stop()
server.set_close_callback(close_callback)
try:
client.write(b'a')
future = server.read_bytes(1)
self.io_loop.add_future(future, self.stop)
self.assertEqual(self.wait().result(), b'a')
self.assertFalse(closed[0])
client.close()
self.wait()
self.assertTrue(closed[0])
finally:
server.close()
client.close()
def test_read_bytes_partial(self):
server, client = self.make_iostream_pair()
try:
# Ask for more than is available with partial=True
client.read_bytes(50, self.stop, partial=True)
server.write(b"hello")
data = self.wait()
self.assertEqual(data, b"hello")
# Ask for less than what is available; num_bytes is still
# respected.
client.read_bytes(3, self.stop, partial=True)
server.write(b"world")
data = self.wait()
self.assertEqual(data, b"wor")
# Partial reads won't return an empty string, but read_bytes(0)
# will.
client.read_bytes(0, self.stop, partial=True)
data = self.wait()
self.assertEqual(data, b'')
finally:
server.close()
client.close()
def test_read_until_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Extra room under the limit
client.read_until_regex(b"def", self.stop, max_bytes=50)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Just enough space
client.read_until_regex(b"def", self.stop, max_bytes=6)
server.write(b"abcdef")
data = self.wait()
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
server.write(b"123456")
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_inline(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Similar to the error case in the previous test, but the
# server writes first so client reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
server.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_read_until_regex_max_bytes_ignores_extra(self):
server, client = self.make_iostream_pair()
client.set_close_callback(lambda: self.stop("closed"))
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
server.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
client.read_until_regex(b"def", self.stop, max_bytes=5)
data = self.wait()
self.assertEqual(data, "closed")
finally:
server.close()
client.close()
def test_small_reads_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write(b"a" * 1024 * 100)
for i in range(100):
client.read_bytes(1024, self.stop)
data = self.wait()
self.assertEqual(data, b"a" * 1024)
finally:
server.close()
client.close()
def test_small_read_untils_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
server.write((b"a" * 1023 + b"\n") * 100)
for i in range(100):
client.read_until(b"\n", self.stop, max_bytes=4096)
data = self.wait()
self.assertEqual(data, b"a" * 1023 + b"\n")
finally:
server.close()
client.close()
def test_flow_control(self):
MB = 1024 * 1024
server, client = self.make_iostream_pair(max_buffer_size=5 * MB)
try:
# Client writes more than the server will accept.
client.write(b"a" * 10 * MB)
# The server pauses while reading.
server.read_bytes(MB, self.stop)
self.wait()
self.io_loop.call_later(0.1, self.stop)
self.wait()
# The client's writes have been blocked; the server can
# continue to read gradually.
for i in range(9):
server.read_bytes(MB, self.stop)
self.wait()
finally:
server.close()
client.close()
class TestIOStreamWebHTTP(TestIOStreamWebMixin, AsyncHTTPTestCase):
def _make_client_iostream(self):
return IOStream(socket.socket(), io_loop=self.io_loop)
class TestIOStreamWebHTTPS(TestIOStreamWebMixin, AsyncHTTPSTestCase):
def _make_client_iostream(self):
return SSLIOStream(socket.socket(), io_loop=self.io_loop,
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
class TestIOStream(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
class TestIOStreamSSL(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
connection = ssl.wrap_socket(connection,
server_side=True,
do_handshake_on_connect=False,
**_server_ssl_options())
return SSLIOStream(connection, io_loop=self.io_loop, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return SSLIOStream(connection, io_loop=self.io_loop,
ssl_options=dict(cert_reqs=ssl.CERT_NONE),
**kwargs)
# This will run some tests that are basically redundant but it's the
# simplest way to make sure that it works to pass an SSLContext
# instead of an ssl_options dict to the SSLIOStream constructor.
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class TestIOStreamSSLContext(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(
os.path.join(os.path.dirname(__file__), 'test.crt'),
os.path.join(os.path.dirname(__file__), 'test.key'))
connection = ssl_wrap_socket(connection, context,
server_side=True,
do_handshake_on_connect=False)
return SSLIOStream(connection, io_loop=self.io_loop, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
return SSLIOStream(connection, io_loop=self.io_loop,
ssl_options=context, **kwargs)
class TestIOStreamStartTLS(AsyncTestCase):
def setUp(self):
try:
super(TestIOStreamStartTLS, self).setUp()
self.listener, self.port = bind_unused_port()
self.server_stream = None
self.server_accepted = Future()
netutil.add_accept_handler(self.listener, self.accept)
self.client_stream = IOStream(socket.socket())
self.io_loop.add_future(self.client_stream.connect(
('127.0.0.1', self.port)), self.stop)
self.wait()
self.io_loop.add_future(self.server_accepted, self.stop)
self.wait()
except Exception as e:
print(e)
raise
def tearDown(self):
if self.server_stream is not None:
self.server_stream.close()
if self.client_stream is not None:
self.client_stream.close()
self.listener.close()
super(TestIOStreamStartTLS, self).tearDown()
def accept(self, connection, address):
if self.server_stream is not None:
self.fail("should only get one connection")
self.server_stream = IOStream(connection)
self.server_accepted.set_result(None)
@gen.coroutine
def client_send_line(self, line):
self.client_stream.write(line)
recv_line = yield self.server_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
@gen.coroutine
def server_send_line(self, line):
self.server_stream.write(line)
recv_line = yield self.client_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
def client_start_tls(self, ssl_options=None, server_hostname=None):
client_stream = self.client_stream
self.client_stream = None
return client_stream.start_tls(False, ssl_options, server_hostname)
def server_start_tls(self, ssl_options=None):
server_stream = self.server_stream
self.server_stream = None
return server_stream.start_tls(True, ssl_options)
@gen_test
def test_start_tls_smtp(self):
# This flow is simplified from RFC 3207 section 5.
# We don't really need all of this, but it helps to make sure
# that after realistic back-and-forth traffic the buffers end up
# in a sane state.
yield self.server_send_line(b"220 mail.example.com ready\r\n")
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250-mail.example.com welcome\r\n")
yield self.server_send_line(b"250 STARTTLS\r\n")
yield self.client_send_line(b"STARTTLS\r\n")
yield self.server_send_line(b"220 Go ahead\r\n")
client_future = self.client_start_tls(dict(cert_reqs=ssl.CERT_NONE))
server_future = self.server_start_tls(_server_ssl_options())
self.client_stream = yield client_future
self.server_stream = yield server_future
self.assertTrue(isinstance(self.client_stream, SSLIOStream))
self.assertTrue(isinstance(self.server_stream, SSLIOStream))
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250 mail.example.com welcome\r\n")
@gen_test
def test_handshake_fail(self):
server_future = self.server_start_tls(_server_ssl_options())
# Certificates are verified with the default configuration.
client_future = self.client_start_tls(server_hostname="localhost")
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
yield client_future
with self.assertRaises((ssl.SSLError, socket.error)):
yield server_future
@unittest.skipIf(not hasattr(ssl, 'create_default_context'),
'ssl.create_default_context not present')
@gen_test
def test_check_hostname(self):
# Test that server_hostname parameter to start_tls is being used.
# The check_hostname functionality is only available in python 2.7 and
# up and in python 3.4 and up.
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
ssl.create_default_context(),
server_hostname=b'127.0.0.1')
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
# The client fails to connect with an SSL error.
yield client_future
with self.assertRaises(Exception):
# The server fails to connect, but the exact error is unspecified.
yield server_future
class WaitForHandshakeTest(AsyncTestCase):
@gen.coroutine
def connect_to_server(self, server_cls):
server = client = None
try:
sock, port = bind_unused_port()
server = server_cls(ssl_options=_server_ssl_options())
server.add_socket(sock)
client = SSLIOStream(socket.socket(),
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
yield client.connect(('127.0.0.1', port))
self.assertIsNotNone(client.socket.cipher())
finally:
if server is not None:
server.stop()
if client is not None:
client.close()
@gen_test
def test_wait_for_handshake_callback(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
# The handshake has not yet completed.
test.assertIsNone(stream.socket.cipher())
self.stream = stream
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
# Now the handshake is done and ssl information is available.
test.assertIsNotNone(self.stream.socket.cipher())
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_future(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
test.assertIsNone(stream.socket.cipher())
test.io_loop.spawn_callback(self.handle_connection, stream)
@gen.coroutine
def handle_connection(self, stream):
yield stream.wait_for_handshake()
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_waiting_error(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
stream.wait_for_handshake(self.handshake_done)
test.assertRaises(RuntimeError, stream.wait_for_handshake)
def handshake_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_connected(self):
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
self.stream = stream
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
self.stream.wait_for_handshake(self.handshake2_done)
def handshake2_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@skipIfNonUnix
class TestPipeIOStream(AsyncTestCase):
def test_pipe_iostream(self):
r, w = os.pipe()
rs = PipeIOStream(r, io_loop=self.io_loop)
ws = PipeIOStream(w, io_loop=self.io_loop)
ws.write(b"hel")
ws.write(b"lo world")
rs.read_until(b' ', callback=self.stop)
data = self.wait()
self.assertEqual(data, b"hello ")
rs.read_bytes(3, self.stop)
data = self.wait()
self.assertEqual(data, b"wor")
ws.close()
rs.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"ld")
rs.close()
def test_pipe_iostream_big_write(self):
r, w = os.pipe()
rs = PipeIOStream(r, io_loop=self.io_loop)
ws = PipeIOStream(w, io_loop=self.io_loop)
NUM_BYTES = 1048576
# Write 1MB of data, which should fill the buffer
ws.write(b"1" * NUM_BYTES)
rs.read_bytes(NUM_BYTES, self.stop)
data = self.wait()
self.assertEqual(data, b"1" * NUM_BYTES)
ws.close()
rs.close()
| apache-2.0 |
j831/zulip | analytics/migrations/0001_initial.py | 40 | 5095 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import zerver.lib.str_utils
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Anomaly',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('info', models.CharField(max_length=1000)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.CreateModel(
name='HuddleCount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('huddle', models.ForeignKey(to='zerver.Recipient')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('property', models.CharField(max_length=40)),
('end_time', models.DateTimeField()),
('interval', models.CharField(max_length=20)),
('value', models.BigIntegerField()),
('anomaly', models.ForeignKey(to='analytics.Anomaly', null=True)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.CreateModel(
name='InstallationCount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('property', models.CharField(max_length=40)),
('end_time', models.DateTimeField()),
('interval', models.CharField(max_length=20)),
('value', models.BigIntegerField()),
('anomaly', models.ForeignKey(to='analytics.Anomaly', null=True)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.CreateModel(
name='RealmCount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('realm', models.ForeignKey(to='zerver.Realm')),
('property', models.CharField(max_length=40)),
('end_time', models.DateTimeField()),
('interval', models.CharField(max_length=20)),
('value', models.BigIntegerField()),
('anomaly', models.ForeignKey(to='analytics.Anomaly', null=True)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.CreateModel(
name='StreamCount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('realm', models.ForeignKey(to='zerver.Realm')),
('stream', models.ForeignKey(to='zerver.Stream')),
('property', models.CharField(max_length=40)),
('end_time', models.DateTimeField()),
('interval', models.CharField(max_length=20)),
('value', models.BigIntegerField()),
('anomaly', models.ForeignKey(to='analytics.Anomaly', null=True)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.CreateModel(
name='UserCount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('realm', models.ForeignKey(to='zerver.Realm')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('property', models.CharField(max_length=40)),
('end_time', models.DateTimeField()),
('interval', models.CharField(max_length=20)),
('value', models.BigIntegerField()),
('anomaly', models.ForeignKey(to='analytics.Anomaly', null=True)),
],
bases=(zerver.lib.str_utils.ModelReprMixin, models.Model),
),
migrations.AlterUniqueTogether(
name='usercount',
unique_together=set([('user', 'property', 'end_time', 'interval')]),
),
migrations.AlterUniqueTogether(
name='streamcount',
unique_together=set([('stream', 'property', 'end_time', 'interval')]),
),
migrations.AlterUniqueTogether(
name='realmcount',
unique_together=set([('realm', 'property', 'end_time', 'interval')]),
),
migrations.AlterUniqueTogether(
name='installationcount',
unique_together=set([('property', 'end_time', 'interval')]),
),
migrations.AlterUniqueTogether(
name='huddlecount',
unique_together=set([('huddle', 'property', 'end_time', 'interval')]),
),
]
| apache-2.0 |
tobiasgehring/qudi | hardware/microwave/mw_source_anritsu.py | 1 | 13860 | # -*- coding: utf-8 -*-
"""
This file contains the Qudi hardware file to control Anritsu Microwave Device.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Parts of this file were developed from a PI3diamond module which is
Copyright (C) 2009 Helmut Rathgen <[email protected]>
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import visa
import time
import numpy as np
from core.base import Base
from interface.microwave_interface import MicrowaveInterface
from interface.microwave_interface import MicrowaveLimits
from interface.microwave_interface import MicrowaveMode
from interface.microwave_interface import TriggerEdge
class MicrowaveAnritsu(Base, MicrowaveInterface):
""" Hardware controlfile for Anritsu Devices. """
_modclass = 'MicrowaveAnritsu'
_modtype = 'hardware'
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
# checking for the right configuration
config = self.getConfiguration()
if 'gpib_address' in config.keys():
self._gpib_address = config['gpib_address']
else:
self.log.error('This is MWanritsu: did not find >>gpib_address<< '
'in configration.')
if 'gpib_timeout' in config.keys():
self._gpib_timeout = int(config['gpib_timeout'])
else:
self._gpib_timeout = 10
self.log.error('This is MWanritsu: did not find >>gpib_timeout<< '
'in configration. I will set it to 10 seconds.')
# trying to load the visa connection to the module
self.rm = visa.ResourceManager()
try:
self._gpib_connection = self.rm.open_resource(self._gpib_address,
timeout=self._gpib_timeout*1000)
except:
self.log.error('This is MWanritsu: could not connect to the GPIB '
'address >>{}<<.'.format(self._gpib_address))
raise
self.model = self._gpib_connection.query('*IDN?').split(',')[1]
self.log.info('MicrowaveAnritsu initialised and connected to '
'hardware.')
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
self._gpib_connection.close()
self.rm.close()
def _command_wait(self, command_str):
"""
Writes the command in command_str via GPIB and waits until the device has finished
processing it.
@param command_str: The command to be written
"""
self._gpib_connection.write(command_str)
self._gpib_connection.write('*WAI')
while int(float(self._gpib_connection.query('*OPC?'))) != 1:
time.sleep(0.2)
return
def get_limits(self):
""" Right now, this is for Anritsu MG37022A with Option 4 only."""
limits = MicrowaveLimits()
limits.supported_modes = (MicrowaveMode.CW, MicrowaveMode.LIST, MicrowaveMode.SWEEP)
limits.min_frequency = 10e6
limits.max_frequency = 20e9
limits.min_power = -105
limits.max_power = 30
limits.list_minstep = 0.001
limits.list_maxstep = 20e9
limits.list_maxentries = 10001
limits.sweep_minstep = 0.001
limits.sweep_maxstep = 20e9
limits.sweep_maxentries = 10001
return limits
def off(self):
"""
Switches off any microwave output.
Must return AFTER the device is actually stopped.
@return int: error code (0:OK, -1:error)
"""
self._gpib_connection.write('OUTP:STAT OFF')
while int(float(self._gpib_connection.query('OUTP:STAT?'))) != 0:
time.sleep(0.2)
return 0
def get_status(self):
"""
Gets the current status of the MW source, i.e. the mode (cw, list or sweep) and
the output state (stopped, running)
@return str, bool: mode ['cw', 'list', 'sweep'], is_running [True, False]
"""
is_running = bool(int(float(self._gpib_connection.query('OUTP:STAT?'))))
mode = self._gpib_connection.query(':FREQ:MODE?').strip('\n').lower()
if mode == 'swe':
mode = 'sweep'
return mode, is_running
def get_power(self):
"""
Gets the microwave output power.
@return float: the power set at the device in dBm
"""
return float(self._gpib_connection.query(':POW?'))
def get_frequency(self):
"""
Gets the frequency of the microwave output.
Returns single float value if the device is in cw mode.
Returns list like [start, stop, step] if the device is in sweep mode.
Returns list of frequencies if the device is in list mode.
@return [float, list]: frequency(s) currently set for this device in Hz
"""
mode, is_running = self.get_status()
if 'cw' in mode:
return_val = float(self._gpib_connection.query(':FREQ?'))
elif 'sweep' in mode:
start = float(self._gpib_connection.query(':FREQ:STAR?'))
stop = float(self._gpib_connection.query(':FREQ:STOP?'))
step = float(self._gpib_connection.query(':SWE:FREQ:STEP?'))
return_val = [start+step, stop, step]
elif 'list' in mode:
stop_index = int(float(self._gpib_connection.query(':LIST:STOP?')))
self._gpib_connection.write(':LIST:IND {0:d}'.format(stop_index))
stop = float(self._gpib_connection.query(':LIST:FREQ?'))
self._gpib_connection.write(':LIST:IND 0')
start = float(self._gpib_connection.query(':LIST:FREQ?'))
step = (stop - start) / (stop_index-1)
return_val = np.arange(start, stop + step, step)
return return_val
def cw_on(self):
""" Switches on any preconfigured microwave output.
@return int: error code (0:OK, -1:error)
"""
mode, is_running = self.get_status()
if is_running:
if mode == 'cw':
return 0
else:
self.off()
if mode != 'cw':
self._command_wait(':FREQ:MODE CW')
self._gpib_connection.write(':OUTP:STAT ON')
dummy, is_running = self.get_status()
while not is_running:
time.sleep(0.2)
dummy, is_running = self.get_status()
return 0
def set_cw(self, frequency=None, power=None):
"""
Configures the device for cw-mode and optionally sets frequency and/or power
@param float frequency: frequency to set in Hz
@param float power: power to set in dBm
@param bool useinterleave: If this mode exists you can choose it.
@return float, float, str: current frequency in Hz, current power in dBm, current mode
Interleave option is used for arbitrary waveform generator devices.
"""
mode, is_running = self.get_status()
if is_running:
self.off()
if mode != 'cw':
self._command_wait(':FREQ:MODE CW')
if frequency is not None:
self._command_wait(':FREQ {0:f}'.format(frequency))
if power is not None:
self._command_wait(':POW {0:f}'.format(power))
mode, dummy = self.get_status()
actual_freq = self.get_frequency()
actual_power = self.get_power()
return actual_freq, actual_power, mode
def list_on(self):
"""
Switches on the list mode microwave output.
Must return AFTER the device is actually running.
@return int: error code (0:OK, -1:error)
"""
mode, is_running = self.get_status()
if is_running:
if mode == 'list':
return 0
else:
self.off()
if mode != 'list':
self._command_wait(':FREQ:MODE LIST')
self._gpib_connection.write(':OUTP:STAT ON')
dummy, is_running = self.get_status()
while not is_running:
time.sleep(0.2)
dummy, is_running = self.get_status()
return 0
def set_list(self, frequency=None, power=None):
"""
Configures the device for list-mode and optionally sets frequencies and/or power
@param list frequency: list of frequencies in Hz
@param float power: MW power of the frequency list in dBm
@return list, float, str: current frequencies in Hz, current power in dBm, current mode
"""
mode, is_running = self.get_status()
if is_running:
self.off()
if mode != 'list':
self._command_wait(':FREQ:MODE LIST')
self._gpib_connection.write(':LIST:TYPE FREQ')
self._gpib_connection.write(':LIST:IND 0')
if frequency is not None:
s = ' {0:f},'.format(frequency[0])
for f in frequency[:-2]:
s += ' {0:f},'.format(f)
s += ' {0:f}'.format(frequency[-1])
self._gpib_connection.write(':LIST:FREQ' + s)
self._gpib_connection.write(':LIST:STAR 0')
self._gpib_connection.write(':LIST:STOP {0:d}'.format(len(frequency)))
self._gpib_connection.write(':LIST:MODE MAN')
self._gpib_connection.write('*WAI')
self._command_wait(':LIST:IND 0')
if power is not None:
self._command_wait(':POW {0:f}'.format(power))
self._command_wait(':TRIG:SOUR EXT')
actual_power = self.get_power()
actual_freq = self.get_frequency()
mode, dummy = self.get_status()
return actual_freq, actual_power, mode
def reset_listpos(self):
"""
Reset of MW list mode position to start (first frequency step)
@return int: error code (0:OK, -1:error)
"""
self._command_wait(':LIST:IND 0')
return 0
def sweep_on(self):
""" Switches on the sweep mode.
@return int: error code (0:OK, -1:error)
"""
mode, is_running = self.get_status()
if is_running:
if mode == 'sweep':
return 0
else:
self.off()
if mode != 'sweep':
self._command_wait(':FREQ:MODE SWEEP')
self._gpib_connection.write(':OUTP:STAT ON')
dummy, is_running = self.get_status()
while not is_running:
time.sleep(0.2)
dummy, is_running = self.get_status()
return 0
def set_sweep(self, start=None, stop=None, step=None, power=None):
"""
Configures the device for sweep-mode and optionally sets frequency start/stop/step
and/or power
@return float, float, float, float, str: current start frequency in Hz,
current stop frequency in Hz,
current frequency step in Hz,
current power in dBm,
current mode
"""
mode, is_running = self.get_status()
if is_running:
self.off()
if mode != 'sweep':
self._command_wait(':FREQ:MODE SWEEP')
self._gpib_connection.write(':SWE:GEN STEP')
self._gpib_connection.write('*WAI')
if (start is not None) and (stop is not None) and (step is not None):
self._gpib_connection.write(':FREQ:START {0}'.format(start - step))
self._gpib_connection.write(':FREQ:STOP {0}'.format(stop))
self._gpib_connection.write(':SWE:FREQ:STEP {0}'.format(step))
self._gpib_connection.write('*WAI')
if power is not None:
self._command_wait(':POW {0:f}'.format(power))
self._command_wait(':TRIG:SOUR EXT')
actual_power = self.get_power()
freq_list = self.get_frequency()
mode, dummy = self.get_status()
return freq_list[0], freq_list[1], freq_list[2], actual_power, mode
def reset_sweeppos(self):
"""
Reset of MW sweep mode position to start (start frequency)
@return int: error code (0:OK, -1:error)
"""
self._command_wait(':ABORT')
return 0
def set_ext_trigger(self, pol=TriggerEdge.RISING):
""" Set the external trigger for this device with proper polarization.
@param TriggerEdge pol: polarisation of the trigger (basically rising edge or falling edge)
@return object: current trigger polarity [TriggerEdge.RISING, TriggerEdge.FALLING]
"""
if pol == TriggerEdge.RISING:
edge = 'POS'
elif pol == TriggerEdge.FALLING:
edge = 'NEG'
else:
self.log.warning('No valid trigger polarity passed to microwave hardware module.')
edge = None
if edge is not None:
self._command_wait(':TRIG:SEQ3:SLOP {0}'.format(edge))
polarity = self._gpib_connection.query(':TRIG:SEQ3:SLOPE?')
if 'NEG' in polarity:
return TriggerEdge.FALLING
else:
return TriggerEdge.RISING
| gpl-3.0 |
CDSP/Marvin | marvinconfig.py | 1 | 2505 | # -*- coding: utf-8 -*-
#rend la console tres bavarde
DEBUG = False
#les sorties CSV et texte peuvent être active en même temps
CONFIG = {
'details_CSV' : True,#active ou non les details sous forme de fichier CSV
'details_text' : True,#active ou non les details sous forme de fichier text
}
IMAGE_NAMING_PATTERN = {
'separator' : '_',
'page_number_location' : -2,
}
#tableau des metadonnees a aller chercher et des valeurs attendues
#les operations supporté sont : 'exact', 'get', 'interval', 'exist'
#'exact' : retourne true si la valeur est exactement la valeur donnes dans le champ 'value' et false sinon
#'get' : retourne la valeur du champ
#'interval' : retourne true si la valeur est un nombre compris entre les valeurs 'start' et 'end' (qui doivent être aussi des nombres) et false sinon
#'exist' : retourne true si la valeur existe et n'est pas une chaine vide et false sinon
METADATA_TO_READ = {
#l'attribut fileName est indispensable, tout les autres sont facultatifs
'fileName' : {
'realPath' : '/jpylyzer/fileInfo/fileName',
'operation' : 'get'
},
'isValidJP2' : {
'realPath' : '/jpylyzer/isValidJP2',
'splitedPath' : ['isValidJP2'],
'operation' : 'exact',
'value' : 'True'
},
'imageHeaderBoxc' : {
'realPath' : '/jpylyzer/properties/jp2HeaderBox/imageHeaderBox/c',
'operation' : 'exact',
'value' : 'jpeg2000'
},
'colourSpecificationBoxMeth' : {
'realPath' : '/jpylyzer/properties/jp2HeaderBox/colourSpecificationBox/meth',
'operation' : 'exact',
'value' : 'Restricted ICC'
},
'iccDescription' : {
'realPath' : '/jpylyzer/properties/jp2HeaderBox/colourSpecificationBox/icc/description',
'operation' : 'exact',
'value' : 'ICC Adobe 98'
},
'vRescInPixelsPerMeter' : {
'realPath' : '/jpylyzer/properties/jp2HeaderBox/resolutionBox/captureResolutionBox/vRescInPixelsPerMeter',
'operation' : 'exist',
},
'hRescInPixelsPerMeter' : {
'realPath' : '/jpylyzer/properties/jp2HeaderBox/resolutionBox/captureResolutionBox/hRescInPixelsPerMeter',
'operation' : 'exist'
},
'transformation' : {
'realPath' : '/jpylyzer/properties/contiguousCodestreamBox/cod/transformation',
'operation' : 'exact',
'value' : '9-7 irreversible'
},
'compressionRatio' : {
'realPath' : '/jpylyzer/properties/compressionRatio',
'operation' : 'interval',
'start' : '8',
'end' : '12'
},
}
#les problemes ignores sont ignore lors de la creation du bordereau mais ils sont ajoute dans le csv de details
IGNORE_PROBLEM = ['fileName','iccDescription'] | gpl-3.0 |
hackultura/django-logincidadao-provider | runtests.py | 1 | 1191 | import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="logincidadao_provider.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"logincidadao_provider",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| bsd-3-clause |
OpenDroneMap/python-WebODM | webodm/models.py | 1 | 2597 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Models classes for returning results."""
class Project(object):
def __init__(self, id, tasks, created_at, name, description, permissions):
self.id = id
self.tasks = tasks
self.created_at = created_at
self.name = name
self.description = description
self.permissions = permissions
def __eq__(self, other):
return self.__dict__ == other.__dict__
@staticmethod
def from_dict(dct):
return Project(
id=dct['id'],
tasks=dct['tasks'],
created_at=dct['created_at'],
name=dct['name'],
description=dct['description'],
permissions=dct['permissions']
)
def as_project_list(dct):
return [Project.from_dict(x) for x in dct.get('results', [])]
class Task(object):
def __init__(
self, id, project, processing_node, images_count, available_assets,
uuid, name, processing_time, auto_processing_node, status,
last_error, options, ground_control_points, created_at,
pending_action):
self.id = id
self.project = project
self.processing_node = processing_node
self.images_count = images_count
self.available_assets = available_assets
self.uuid = uuid
self.name = name
self.processing_time = processing_time
self.auto_processing_node = auto_processing_node
self.status = status
self.last_error = last_error
self.options = options
self.ground_control_points = ground_control_points
self.created_at = created_at
self.pending_action = pending_action
def __eq__(self, other):
return self.__dict__ == other.__dict__
@staticmethod
def from_dict(dct):
return Task(
id=dct['id'],
project=dct['project'],
processing_node=dct['processing_node'],
images_count=dct['images_count'],
available_assets=dct['available_assets'],
uuid=dct['uuid'],
name=dct['name'],
processing_time=dct['processing_time'],
auto_processing_node=dct['auto_processing_node'],
status=dct['status'],
last_error=dct['last_error'],
options=dct['options'],
ground_control_points=dct['ground_control_points'],
created_at=dct['created_at'],
pending_action=dct['pending_action'],
)
def as_task_list(data):
return [Task.from_dict(x) for x in data]
| mit |
fhe-odoo/odoo | addons/website_mail/models/mail_thread.py | 338 | 1454 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
# TODO for trunk, remove me
class MailThread(osv.AbstractModel):
_inherit = 'mail.thread'
_columns = {
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
}
| agpl-3.0 |
datalogics-robb/scons | test/option/debug-objects.py | 2 | 1873 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the --debug=objects option works.
"""
import TestSCons
import sys
import string
import re
import time
test = TestSCons.TestSCons()
try:
import weakref
except ImportError:
x = "Python version has no 'weakref' module; skipping tests.\n"
test.skip_test(x)
test.write('SConstruct', """
def cat(target, source, env):
open(str(target[0]), 'wb').write(open(str(source[0]), 'rb').read())
env = Environment(BUILDERS={'Cat':Builder(action=Action(cat))})
env.Cat('file.out', 'file.in')
""")
test.write('file.in', "file.in\n")
# Just check that it runs, we're not overly concerned about the actual
# output at this point.
test.run(arguments = "--debug=objects")
test.pass_test()
| mit |
josrolgil/exjobbCalvin | calvin/utilities/attribute_resolver.py | 1 | 16606 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import copy
import re
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
# The order of the address fields
address_keys = ["country", "stateOrProvince", "locality", "street", "streetNumber", "building", "floor", "room"]
address_help = {"country": 'ISO 3166-1 alpha2 coded country name string',
"stateOrProvince": 'ISO 3166-2 coded sub-country name string',
"locality": 'Name of e.g. city',
"street": 'Street name',
"streetNumber": 'String or number for street number',
"building": 'Some buildings have names (maybe instead of number)',
"floor": 'String or number specifying floor of building',
"room": 'String or number specifying room or flat name'}
# The order of the owner fields
owner_keys = ["organization", "organizationalUnit", "role", "personOrGroup"]
owner_help = {"organization": '(reversed DNS) name of organisation',
"organizationalUnit": 'Sub-unit name',
"role": 'The title of owner e.g. "Site owner", "admin"',
"personOrGroup": 'The name of the owner(s), e.g. person name or responsible group name'}
# The order of the node name fields, purpose field could take values such as test, production, etc
node_name_keys = ["organization", "organizationalUnit", "purpose", "group", "name"]
node_name_help = {"organization": '(reversed DNS) name of organisation',
"organizationalUnit": 'Sub-unit name',
"purpose": 'If specific purpose of node, e.g. test, production',
"group": 'Name of node group e.g. "project" name',
"name": 'Name of node'}
attribute_docs = '''
# Calvin Node Attributes
Command line tool csruntime take the node attribute data in JSON coded form, either on the line or as a file.
csruntime -n localhost --attr-file test_attr.json
csruntime -n localhost --attr '{"indexed_public": {"owner": {"personOrGroup": "Me"}}}'
The python functions start_node and dispatch_node takes the attributes as a python object.
The data structure is as follows:
{
"public": # Any public info stored in storage when requesting the node id
{ # TODO fomalize these values, e.g. public key
},
"private": # Any configuration of the node that is NOT in storage only kept in node
{ # TODO formalize these values, e.g. private key
},
"indexed_public": # Any public info that is also searchable by the index, also in storage with node id
# The index is prefix searchable by higher level keywords. It is OK to skip levels.
# This list is formal and is intended to be extended as needed
{
'''
_indent_index = 20
_indent_index2 = _indent_index + 4
attribute_docs += ' ' * _indent_index + '"owner": {# The node\'s affilation\n'
attribute_docs += (',\n').join([' ' * _indent_index2 + '"' + a + '": ' + owner_help[a] for a in owner_keys]) + '\n' + ' ' * _indent_index + '},\n'
attribute_docs += ' ' * _indent_index + '"address": {# The node\'s (static) address\n'
attribute_docs += (',\n').join([' ' * _indent_index2 + '"' + a + '": ' + address_help[a] for a in address_keys]) + '\n' + ' ' * _indent_index + '},\n'
attribute_docs += ' ' * _indent_index + '"node_name": { # The node\'s static easy identification\n'
attribute_docs += (',\n').join([' ' * _indent_index2 + '"' + a + '": ' + node_name_help[a] for a in node_name_keys]) + '\n' + ' ' * _indent_index + '},\n'
attribute_docs += ' ' * _indent_index + '''"user_extra": {# Any user specific extra attributes, as a list of list with index words, not possible to skip levels
}
}
The public indexed values can be obtained by get_index function or the corresponding control API.
To format the index search string an attribute resolver function needs to be used:
from calvin.utilities.attribute_resolver import format_index_string
format_index_string(attr_obj, trim=True)
where the attr_obj should only contain ONE attribute e.g.
{"owner": {"organization": "org.testorg", "role": "admin"}}
alternatively the attr is a tuple e.g.:
("owner", {"organization": "org.testorg", "role": "admin"})
The trim parameter when true will remove trailing empty keys instead of leaving them empty, this allows the
prefix search to find nodes based only on the included higher level keys.
'''
# Country codes
countries = ["AD", "AE", "AF", "AG", "AI", "AL", "AM", "AO", "AQ", "AR", "AS", "AT", "AU", "AW", "AX", "AZ", "BA",
"BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BL", "BM", "BN", "BO", "BQ", "BR", "BS", "BT", "BV",
"BW", "BY", "BZ", "CA", "CC", "CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU",
"CV", "CW", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "EH", "ER", "ES",
"ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA", "GB", "GD", "GE", "GF", "GG", "GH", "GI", "GL", "GM",
"GN", "GP", "GQ", "GR", "GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID", "IE",
"IL", "IM", "IN", "IO", "IQ", "IR", "IS", "IT", "JE", "JM", "JO", "JP", "KE", "KG", "KH", "KI", "KM",
"KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB", "LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY",
"MA", "MC", "MD", "ME", "MF", "MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT",
"MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI", "NL", "NO", "NP", "NR", "NU",
"NZ", "OM", "PA", "PE", "PF", "PG", "PH", "PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA",
"RE", "RO", "RS", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK", "SL", "SM",
"SN", "SO", "SR", "SS", "ST", "SV", "SX", "SY", "SZ", "TC", "TD", "TF", "TG", "TH", "TJ", "TK", "TL",
"TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ", "UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE",
"VG", "VI", "VN", "VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW"]
class AttributeResolverHelper(object):
'''Resolves attributes'''
@staticmethod
def _to_unicode(value):
if isinstance(value, str):
return value.decode("UTF-8")
elif isinstance(value, unicode):
return value
else:
return str(value).decode("UTF-8")
@classmethod
def owner_resolver(cls, attr):
if not isinstance(attr, dict):
raise Exception('Owner attribute must be a dictionary with %s keys.' % owner_keys)
if "country" in attr:
attr["country"] = attr["country"].upper()
if attr["country"] not in countries:
raise Exception("country must be ISO 3166-1 alpha2")
if "stateOrProvince" in attr and "country" not in attr:
raise Exception("country required for stateOrProvince, see ISO 3166-2 for proper code")
resolved = [cls._to_unicode(attr[k]) if k in attr.keys() else None for k in owner_keys]
return resolved
@classmethod
def node_name_resolver(cls, attr):
if not isinstance(attr, dict):
raise Exception('Node name attribute must be a dictionary with %s keys.' % node_name_keys)
resolved = [cls._to_unicode(attr[k]) if k in attr.keys() else None for k in node_name_keys]
return resolved
@classmethod
def address_resolver(cls, attr):
if not isinstance(attr, dict):
raise Exception('Address attribute must be a dictionary with %s keys.' % address_keys)
resolved = [cls._to_unicode(attr[k]) if k in attr.keys() else None for k in address_keys]
return resolved
@classmethod
def extra_resolver(cls, attr):
if isinstance(attr, list) and attr and isinstance(attr[0], list):
return attr
else:
raise Exception('User extra attribute must be a list of ordered attribute lists.')
@staticmethod
def encode_index(attr, as_list=False):
attr_str = '/node/attribute'
attr_list = [u'node', u'attribute']
for a in attr:
if a is None:
a = ''
else:
# Replace \ with \\, looks funny due to \ is used to escape characters in python also
a = a.replace('\\', '\\\\')
# Replace / with \/
a = a.replace('/', '\\/')
attr_str += '/' + a
attr_list.append(a)
return attr_list if as_list else attr_str
@staticmethod
def decode_index(attr_str):
if not attr_str.startswith('/node/attribute'):
raise Exception('Index %s not a node attribute' % attr_str)
attr_str = attr_str[len('/node/attribute') + 1:]
attr = re.split(r"(?<![^\\]\\)/", attr_str)
attr2 = []
for a in attr:
a = a.replace('\\/', '/')
a = a.replace('\\\\', '\\')
if a:
attr2.append(a)
else:
attr2.append(None)
return attr2
attr_resolver = {"owner": AttributeResolverHelper.owner_resolver,
"node_name": AttributeResolverHelper.node_name_resolver,
"address": AttributeResolverHelper.address_resolver,
"user_extra": AttributeResolverHelper.extra_resolver}
keys = {"owner": owner_keys,
"node_name": node_name_keys,
"address": address_keys}
def format_index_string(attr, trim=True):
''' To format the index search string an attribute resolver function needs to be used:
where the attr should only contain ONE attribute e.g.
{"owner": {"organization": "org.testorg", "role":"admin"}}
alternatively the attr is a tuple e.g.:
("owner", {"organization": "org.testorg", "role":"admin"})
The trim parameter, when true, will remove trailing empty keys instead of leaving them empty.
This allows the prefix search to find nodes based only on the included higher level keys.
'''
attr_type = None
attribute = None
if isinstance(attr, dict):
attr_type, attribute = attr.iteritems().next()
elif isinstance(attr, (list, tuple)):
attr_type, attribute = attr[0], attr[1]
_attr = attr_resolver[attr_type](attribute)
if trim:
_attr = [_attr[i] for i in range(len(_attr)) if any(_attr[i:])]
return AttributeResolverHelper.encode_index([attr_type] + _attr)
class AttributeResolver(object):
'''Resolves incoming attributes for a node and verify it'''
def __init__(self, attr):
super(AttributeResolver, self).__init__()
self.attr = attr
if self.attr is None:
self.attr = {"indexed_public": {}, "public": {}, "private": {}}
else:
self.resolve()
def __str__(self):
return str(self.attr)
def resolve(self):
if not isinstance(self.attr, dict):
raise Exception('Attributes must be a dictionary with "public", "private" and "indexed_public" keys.')
self.attr["indexed_public"] = self.resolve_indexed_public(self.attr.get("indexed_public", None))
# TODO resolve public and private fields
if "public" not in self.attr:
self.attr["public"] = {}
if "private" not in self.attr:
self.attr["private"] = {}
def resolve_indexed_public(self, attr):
if attr:
for attr_type, attribute in attr.items():
try:
attr[attr_type] = attr_resolver[attr_type](attribute)
except Exception:
attr[attr_type] = []
return attr
else:
return {}
def _get_attribute_helper(self, indices, value):
if indices == []:
return value
else:
return self._get_attribute_helper(indices[1:], value[indices[0]])
def _get_attribute(self, index, which):
indices = [idx for idx in index.split("/") if idx] # remove extra /'s
try:
return self._get_attribute_helper(indices, self.attr[which])
except KeyError:
_log.warning("Warning: No such attribute '%r'" % (index,))
return {}
except:
_log.error("Error: Invalid attribute '%r'" % (index,))
def _has_attribute(self, index, which):
indices = [idx for idx in index.split("/") if idx] # remove extra /'s
try:
return self._get_attribute_helper(indices, self.attr[which]) or True
except KeyError:
return False
except:
_log.error("Error: Invalid attribute '%r'" % (index,))
def has_private_attribute(self, index):
return self._has_attribute(index, "private")
def has_public_attribute(self, index):
return self._has_attribute(index, "public")
def get_private(self, index=None):
if not index:
return self.attr["private"]
return self._get_attribute(index, "private")
def get_public(self, index=None):
if not index:
return self.attr["public"]
return self._get_attribute(index, "public")
def get_indexed_public(self, as_list=False):
# Return all indexes encoded for storage as a list of lists
return [AttributeResolverHelper.encode_index([AttributeResolverHelper._to_unicode(k)] + v, as_list=as_list) for k, v in self.attr["indexed_public"].items()]
def get_node_name_as_str(self):
"""
Generate a string corresponding to the attribute node name.
The sub-parts are concatenated by '+' to be able to use it as a filename.
"""
try:
return '+'.join(["" if i is None else i for i in self.attr['indexed_public']['node_name']])
except:
return None
def get_indexed_public_with_keys(self):
"""
Return a dictionary with all indexed_public attributes that have been set.
The attribute type (e.g. "owner") and the attribute name (e.g. "organization")
are concatenated using "." to form the key (e.g. "owner.organization").
"""
return {attr_type + "." + keys[attr_type][i]: value
for attr_type, value_list in self.attr["indexed_public"].iteritems()
for i, value in enumerate(value_list) if value is not None}
if __name__ == "__main__":
ar = AttributeResolver({"indexed_public": {
"address": {"country": "SE", "locality": "Lund", "street": u"Sölvegatan", "streetNumber": 53},
"owner": {"organization": u"ericsson.com", "organizationalUnit": "Ericsson Research", "personOrGroup": "CT"},
"node_name": {"organization": "ericsson.com", "purpose": "Test", "name": "alpha1"}}})
s = AttributeResolverHelper.encode_index(['a1', 'a2', 'a3'])
print s
print AttributeResolverHelper.decode_index(s)
s = AttributeResolverHelper.encode_index(['a/1', 'a\\2', 'ö3'])
print s
aa = AttributeResolverHelper.decode_index(s)
print aa, 'correct?', aa[2]
s = AttributeResolverHelper.encode_index(['a1/', '', 'a2\\', u'a3'])
print s
print AttributeResolverHelper.decode_index(s)
aa = ar.get_indexed_public(as_list=True)
print aa
print aa[1][6]
ar = AttributeResolver(None)
aa = ar.get_indexed_public(as_list=True)
print aa
print ar.resolve_indexed_public({"owner": {"organization": "org.testexample", "personOrGroup": "testOwner1"}})
print format_index_string({"owner": {"organization": "org.testexample", "personOrGroup": "testOwner1"}})
print ar.resolve_indexed_public({"owner": {"organization": "org.testexample"}})
print format_index_string({"owner": {"organization": "org.testexample"}})
print format_index_string({"owner": {}})
| apache-2.0 |
dangeratio/scrum | projects/migrations/0001_initial.py | 1 | 1883 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('detail', models.CharField(max_length=2000)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('detail', models.CharField(max_length=2000)),
],
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('detail', models.CharField(max_length=2000)),
('project_id', models.ForeignKey(to='projects.Project')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('detail', models.CharField(max_length=2000)),
('item_id', models.ForeignKey(to='projects.Item')),
],
),
migrations.AddField(
model_name='item',
name='release_id',
field=models.ForeignKey(to='projects.Release'),
),
]
| gpl-2.0 |
OSBI/oodt | agility/oodt/query.py | 7 | 17979 | # encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE.txt file distributed with
# this work for additional information regarding copyright ownership. The ASF
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Agile OODT Query Expressions.
Query expressions in OODT are based on the "DIS" style expressions originally
developed for the Planetary Data System. They consist of
keyword/operator/literal-value triples, such as `targetName = Mars`, each
linked with logical operators (and, or, not) and grouped with parentheses.
For more information, see OODT_.
This module defines classes that model the aspects of a query. In general,
use the `Query` class, passing a string containing your keyword expression as
the first constructor argument. From there, you have a `Query` object you can
pass around to profile servers, product servers, and so forth.
.. _OODT: http://oodt.apache.org/
'''
__docformat__ = 'restructuredtext'
import oodterrors, shlex, xmlutils, xml.dom
from xmlutils import DocumentableField
class _QueryExpressionScanner(shlex.shlex):
'''Extend the shlex scanner but for the DIS-style query expressions we expect.
This means adding a dot to the characters that comprise a word so we can easily parse
floating-point numbers. Also, there's no comment character.
'''
def __init__(self, str):
'''Create scanner. `str` is the string to scan.
'''
shlex.shlex.__init__(self, str)
self.commenters = ''
self.wordchars = self.wordchars + '.-/:'
def get_token(self):
'''Get the next token. We strip quotes from strings, attach negative signs
to the numbers they're negating, and attach the = to <, >, and ! where needed.
'''
token = shlex.shlex.get_token(self)
if token == self.eof or token == None:
return None
if token[0] in self.quotes:
token = token[1:-1]
elif token in ('<', '>', '!'):
next = shlex.shlex.get_token(self)
if next == self.eof or next == None:
return None
elif next == '=':
token = token + next
else:
self.push_token(next)
return token
class QueryException(oodterrors.OODTException):
'''Exceptions related to query expression or query services
'''
pass
class ExpressionParseError(QueryException):
'''Error in parsing a query expression.
'''
pass
class QueryElement(xmlutils.Documentable):
'''An element of a query.
'''
def __init__(self, role='UNKNOWN', value='UNKNOWN', node=None):
'''Create a QueryElement. You can provide role and value settings, or provide an XML
DOM node which will be parsed for role/value.
'''
self.role, self.value = role, value
if node != None:
self.parse(node)
def getDocumentElementName(self):
'''Give the XML tag name: `queryElement`.
'''
return 'queryElement'
def getDocumentableFields(self):
'''Get the attributes that go into XML.
'''
return (DocumentableField('role', u'tokenRole', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('value', u'tokenValue', DocumentableField.SINGLE_VALUE_KIND))
def __repr__(self):
return 'QueryElement(role="%s",value="%s")' % (self.role, self.value)
class QueryHeader(xmlutils.Documentable):
'''Header of a query. Captures metadata like data dictionary in use, etc.
'''
def __init__(self, id='UNKNOWN', title='UNKNOWN', desc='UNKNOWN', type='QUERY', status='ACTIVE', security='UNKNOWN',
rev='2005-10-01 SCK v0.0.0 Under Development', dataDict='UNKNOWN', node=None):
'''Initialize a QueryHeader. Provide id, title, desc, type, status, security, rev,
and dataDict settings. Or, provide just an XML DOM node to be parsed.
'''
self.id, self.title, self.desc, self.type, self.status, self.security, self.rev, self.dataDict = \
id, title, desc, type, status, security, rev, dataDict
if node != None:
self.parse(node)
def getDocumentElementName(self):
'''Give the XML tag name: `queryAttributes`.
'''
return 'queryAttributes'
def getDocumentableFields(self):
'''Get the attributes that go into XML.
'''
return (DocumentableField('id', u'queryId', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('title', u'queryTitle', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('desc', u'queryDesc', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('type', u'queryType', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('status', u'queryStatusId', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('security', u'querySecurityType', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('rev', u'queryRevisionNote', DocumentableField.SINGLE_VALUE_KIND),
DocumentableField('dataDict', u'queryDataDictId', DocumentableField.SINGLE_VALUE_KIND))
def __repr__(self):
return 'QueryHeader(id="%s",title="%s",desc="%s",type="%s",status="%s",security="%s",rev="%s",dataDict="%s")' % (
self.id, self.title, self.desc, self.type, self.status, self.security, self.rev, self.dataDict
)
def __cmp__(self, other):
return cmp((self.id, self.title, self.desc, self.type, self.status, self.security, self.rev, self.dataDict),
(other.id, other.title, other.desc, other.type, other.status, other.security, other.rev, other.dataDict))
class QueryResult(object):
'''Result of a query.
'''
def __init__(self, results=[], node=None):
'''Results of a query are captured as a sequence of generic objects.
'''
self.results = results
if node != None: self.parse(node)
def parse(self, node):
'''Initialize this object from the given XML DOM `node`.
'''
if 'queryResultSet' != node.nodeName:
raise ValueError('Expected queryResultSet but got "%s"' % node.nodeName)
for child in node.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE and 'resultElement' == child.nodeName:
self.results.append(Result(node=child))
def toXML(self, owner):
'''Convert this object into XML owned by the given `owner` document.
'''
root = owner.createElement('queryResultSet')
for result in self.results:
root.appendChild(result.toXML(owner))
return root
def clear(self):
'''Remove all results.
'''
self.results = []
def __getitem__(self, i):
return self.results[i]
def __len__(self):
return len(self.results)
def __cmp__(self, other):
return cmp(self.results, other.results)
_RELOPS = {
'LT': 'LT',
'lt': 'LT',
'<': 'LT',
'LE': 'LE',
'le': 'LE',
'<=': 'LE',
'EQ': 'EQ',
'eq': 'EQ',
'=': 'EQ',
'GE': 'GE',
'ge': 'GE',
'>=': 'GE',
'GT': 'GT',
'gt': 'GT',
'>': 'GT',
'NE': 'NE',
'ne': 'NE',
'!=': 'NE',
'LIKE': 'LIKE',
'like': 'LIKE',
'NOTLIKE': 'NOTLIKE',
'notlike': 'NOTLIKE',
'notLike': 'NOTLIKE',
'IS': 'IS',
'is': 'is',
'ISNOT': 'ISNOT',
'isnot': 'isnot',
'isNot': 'isnot'
}
_LOGOPS = {
'AND': 'AND',
'and': 'AND',
'&': 'AND',
'OR': 'OR',
'or': 'OR',
'|': 'OR',
'NOT': 'NOT',
'not': 'NOT',
'!': 'NOT'
}
_PRECEDENCE = {
'NOT': 2,
'AND': 1,
'OR': 0,
}
class Query(object):
'''Query. In old OODT, this was called XMLQuery, even though XML was tagential to it.
Captures aspects of a query, including header, results, and so forth. Most importantly, it
captures the query expression, which contains the constraints on user's desiderata and
range on what to return.
As with other classes in this module, you can provide an XML DOM node to be parsed
for the query's settings, or provide each of them individually.
'''
def __init__(self, keywordQuery=None, header=QueryHeader(), resultModeID='ATTRIBUTE', propType='BROADCAST',
propLevels='N/A', maxResults=1, mimeAccept=[], parseQuery=True, node=None):
'''Initialize a query. Usually you provide just the `keywordQuery` argument
which should be a keyword/query expression in the DIS-style; or you provide
the `node` which is an XML DOM node describing a query.
'''
self.header, self.resultModeID, self.propType, self.propLevels, self.maxResults, self.mimeAccept = \
header, resultModeID, propType, propLevels, maxResults, mimeAccept
self.wheres, self.selects, self.froms, self.resultSet = [], [], [], QueryResult()
if keywordQuery != None:
self.keywordQuery = keywordQuery
if parseQuery:
self.wheres, self.selects = _parseQuery(keywordQuery)
else:
self.keywordQuery = ''
if node != None:
self.parse(node)
def toXML(self, owner):
'''Yield this query as an XML DOM.
'''
query = owner.createElement('query')
query.appendChild(self.header.toXML(owner))
xmlutils.add(query, u'queryResultModeId', self.resultModeID)
xmlutils.add(query, u'queryPropogationType', self.propType)
xmlutils.add(query, u'queryPropogationLevels', self.propLevels)
for mimeType in self.mimeAccept:
xmlutils.add(query, u'queryMimeAccept', mimeType)
xmlutils.add(query, u'queryMaxResults', str(self.maxResults))
xmlutils.add(query, u'queryKWQString', self.keywordQuery)
selects = owner.createElement(u'querySelectSet')
query.appendChild(selects)
for select in self.selects:
selects.appendChild(select.toXML(owner))
fromElement = owner.createElement(u'queryFromSet')
query.appendChild(fromElement)
for i in self.froms:
fromElement.appendChild(i.toXML(owner))
wheres = owner.createElement(u'queryWhereSet')
query.appendChild(wheres)
for where in self.wheres:
wheres.appendChild(where.toXML(owner))
query.appendChild(self.resultSet.toXML(owner))
return query
def parse(self, node):
'''Parse the XML DOM node as a query document.
'''
if 'query' != node.nodeName:
raise ValueError('Expected query but got "%s"' % node.nodeName)
self.mimeAccept, self.results = [], []
for child in node.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE:
if child.nodeName == u'queryAttributes':
self.header = QueryHeader(node=child)
elif child.nodeName == u'resultModeID':
self.resultModeID = xmlutils.text(child)
elif child.nodeName == u'queryPropogationType':
self.propType = xmlutils.text(child)
elif child.nodeName == u'queryPropogationLevels':
self.propLevels = xmlutils.text(child)
elif child.nodeName == u'queryMimeAccept':
self.mimeAccept.append(xmlutils.text(child))
elif child.nodeName == u'queryMaxResults':
self.maxResults = int(xmlutils.text(child))
elif child.nodeName == u'queryKWQString':
self.keywordQuery = xmlutils.text(child)
elif child.nodeName == u'querySelectSet':
self.selects = _parseQueryElements(child)
elif child.nodeName == u'queryFromSet':
self.froms = _parseQueryElements(child)
elif child.nodeName == u'queryWhereSet':
self.wheres = _parseQueryElements(child)
elif child.nodeName == u'queryResultSet':
self.resultSet = QueryResult(node=child)
def __cmp__(self, other):
header = cmp(self.header, other.header)
if header < 0:
return -1
elif header == 0:
resultModeID = cmp(self.resultModeID, other.resultModeID)
if resultModeID < 0:
return -1
elif resultModeID == 0:
propType = cmp(self.propType, other.propType)
if propType < 0:
return -1
elif propType == 0:
propLevels = cmp(self.propLevels, other.propLevels)
if propLevels < 0:
return -1
elif propLevels == 0:
maxResults = self.maxResults - other.maxResults
if maxResults < 0:
return -1
elif maxResults == 0:
mimeAccept = cmp(self.mimeAccept, other.mimeAccept)
if mimeAccept < 0:
return -1
elif mimeAccept == 0:
selects = cmp(self.selects, other.selects)
if selects < 0:
return -1
elif selects == 0:
froms = cmp(self.froms, other.froms)
if froms < 0:
return -1
elif froms == 0:
wheres = cmp(self.wheres, other.wheres)
if wheres < 0:
return -1
elif wheres == 0:
return cmp(self.resultSet, other.resultSet)
return 1
def _parseQueryElements(node):
'''The children of the given XML DOM node are a sequence of queryElements. Parse them
and return a list of QueryElement objects.
'''
a = []
for child in node.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE:
a.append(QueryElement(node=child))
return a
def _parseQuery(s):
'''Parse the query expression in `s`.
'''
if s is None:
return [], []
if len(s.strip()) == 0:
return [], []
if s.count('(') != s.count(')'):
raise ExpressionParseError('Unbalanced parentheses')
scanner = _QueryExpressionScanner(s)
return _buildQuery(scanner)
def _buildQuery(scanner):
'''Build the query stacks using the given `scanner`.
'''
operators, expression, selectors = [], [], []
while True:
token = scanner.get_token()
if token is None: break
if token in _LOGOPS:
op = QueryElement('LOGOP', _LOGOPS[token])
if len(operators) == 0:
operators.append(op)
else:
while len(operators) > 0 and _PRECEDENCE[operators[-1].value] > _PRECEDENCE[op.value]:
expression.append(operators.pop())
operators.append(op)
elif token == '(':
subExpr, subSelectors = _buildQuery(scanner)
expression.extend(subExpr)
selectors.extend(subSelectors)
elif token == ')':
break
else:
_addTerm(token, scanner, expression, operators, selectors)
if len(operators) > 0 and len(expression) == 0:
raise ExpressionParseError('Query contains only logical operators')
operators.reverse()
expression.extend(operators)
return expression, selectors
def _addTerm(elemName, scanner, expression, operators, selectors):
'''Add a term to the correct stack.
'''
relop = scanner.get_token()
if relop is None:
raise ExpressionParseError('Expected relational operator after element name "%s"' % elemName)
if relop not in _RELOPS:
raise ExpressionParseError('Unknown relational operator "%s"' % relop)
literal = scanner.get_token()
if literal is None:
raise ExpressionParseError('Expected literal value for "%s %s" comparison' % (elemName, relop))
if elemName == 'RETURN':
selectors.append(QueryElement('elemName', literal))
if len(operators) > 0:
operators.pop()
else:
scanner.get_token()
else:
expression.append(QueryElement('elemName', elemName))
expression.append(QueryElement('LITERAL', literal))
expression.append(QueryElement('RELOP', _RELOPS[relop]))
# Sample code:
# if __name__ == '__main__':
# import urllib, xml.dom.minidom
#
# impl = xml.dom.minidom.getDOMImplementation()
# doc = impl.createDocument(None, None, None) # nsURI, qName, docType
#
# q = Query('track = Innocente')
# node = q.toXML(doc)
# doc.appendChild(node)
# q = doc.toxml()
# f = urllib.urlopen('http://localhost:8080/pds/prof', urllib.urlencode(dict(xmlq=q)), {}) # url, postdata, proxies (none)
# print f.read()
| apache-2.0 |
almeidapaulopt/frappe | frappe/desk/doctype/todo/todo.py | 13 | 2986 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.document import Document
from frappe.utils import get_fullname
subject_field = "description"
sender_field = "sender"
exclude_from_linked_with = True
class ToDo(Document):
def validate(self):
self._assignment = None
if self.is_new():
self._assignment = {
"text": frappe._("Assigned to {0}: {1}").format(get_fullname(self.owner), self.description),
"comment_type": "Assigned"
}
else:
# NOTE the previous value is only available in validate method
if self.get_db_value("status") != self.status:
self._assignment = {
"text": frappe._("Assignment closed by {0}".format(get_fullname(frappe.session.user))),
"comment_type": "Assignment Completed"
}
def on_update(self):
if self._assignment:
self.add_assign_comment(**self._assignment)
self.update_in_reference()
def on_trash(self):
# unlink todo from linked comments
frappe.db.sql("""update `tabCommunication` set link_doctype=null, link_name=null
where link_doctype=%(doctype)s and link_name=%(name)s""", {"doctype": self.doctype, "name": self.name})
self.update_in_reference()
def add_assign_comment(self, text, comment_type):
if not (self.reference_type and self.reference_name):
return
frappe.get_doc(self.reference_type, self.reference_name).add_comment(comment_type, text)
def update_in_reference(self):
if not (self.reference_type and self.reference_name):
return
try:
assignments = [d[0] for d in frappe.get_all("ToDo",
filters={
"reference_type": self.reference_type,
"reference_name": self.reference_name,
"status": "Open"
},
fields=["owner"], as_list=True)]
assignments.reverse()
frappe.db.set_value(self.reference_type, self.reference_name,
"_assign", json.dumps(assignments), update_modified=False)
except Exception as e:
if e.args[0] == 1146 and frappe.flags.in_install:
# no table
return
elif e.args[0]==1054:
from frappe.model.db_schema import add_column
add_column(self.reference_type, "_assign", "Text")
self.update_in_reference()
else:
raise
# NOTE: todo is viewable if either owner or assigned_to or System Manager in roles
def on_doctype_update():
frappe.db.add_index("ToDo", ["reference_type", "reference_name"])
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if "System Manager" in frappe.get_roles(user):
return None
else:
return """(tabToDo.owner = '{user}' or tabToDo.assigned_by = '{user}')"""\
.format(user=frappe.db.escape(user))
def has_permission(doc, user):
if "System Manager" in frappe.get_roles(user):
return True
else:
return doc.owner==user or doc.assigned_by==user
@frappe.whitelist()
def new_todo(description):
frappe.get_doc({
'doctype': 'ToDo',
'description': description
}).insert() | mit |
sgerhart/ansible | test/runner/lib/cloud/opennebula.py | 34 | 1609 | """OpenNebula plugin for integration tests."""
from lib.cloud import (
CloudProvider,
CloudEnvironment
)
from lib.util import (
display,
)
class OpenNebulaCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def filter(self, targets, exclude):
""" no need to filter modules, they can either run from config file or from fixtures"""
pass
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(OpenNebulaCloudProvider, self).setup()
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
display.info('No config file provided, will run test from fixtures')
config = self._read_config_template()
values = dict(
URL="http://localhost/RPC2",
USERNAME='oneadmin',
PASSWORD='onepass',
FIXTURES='true',
REPLAY='true',
)
config = self._populate_config_template(config, values)
self._write_config(config)
class OpenNebulaCloudEnvironment(CloudEnvironment):
"""
Updates integration test environment after delegation. Will setup the config file as parameter.
"""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
cmd.append('-e')
cmd.append('@%s' % self.config_path)
cmd.append('-e')
cmd.append('resource_prefix=%s' % self.resource_prefix)
| mit |
zhongyi-zhang/azure-quickstart-templates | splunk-on-ubuntu/scripts/dobackup.py | 119 | 3256 | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Microsoft. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Back up a tar-ball to Azure blob storage.
Usage:
python dobackup.py <path/file.tar>
"""
from azure.storage import CloudStorageAccount
import config, time, argparse
def _get_parameters():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="path+name of tar-ball to send to blob storage")
args = parser.parse_args()
input_file = args.input_file
return input_file
def _get_service():
account_name = config.STORAGE_ACCOUNT_NAME
account_key = config.STORAGE_ACCOUNT_KEY
account = CloudStorageAccount(account_name = account_name, account_key = account_key)
service = account.create_block_blob_service()
return service
# The last time a backup was dropped into the folder, it was named 'splunketccfg.tar'.
# This time, I rename that file to have a datetime stamp on the end of it.
# And then I copy the new backup to 'splunketccfg.tar'.
# This way, the newest backup is always 'splunketccfg.tar'. Easier to find when it's time to restore.
# The edge case is the first time backup is run. So I check for existence before trying to copy.
def _store_tarball(service, input_file):
trg_container_name = 'backups'
stacked_blob_name = 'splunketccfg_' + time.strftime('%m%d%YT%H%M%S') + '.tar'
newest_blob_name = 'splunketccfg.tar'
exists = service.exists(trg_container_name, newest_blob_name)
if exists:
source = service.make_blob_url(trg_container_name, newest_blob_name)
service.copy_blob(trg_container_name, stacked_blob_name, source)
service.create_blob_from_path(trg_container_name, newest_blob_name, input_file)
def main():
input_file = _get_parameters()
service = _get_service()
_store_tarball(service, input_file)
if __name__ == '__main__':
main()
| mit |
padipadou/CADL | session-5/libs/dataset_utils.py | 5 | 14402 | """Utils for dataset creation.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import os
import pickle
import numpy as np
import tensorflow as tf
from . import dft
from .utils import download_and_extract_tar
def create_input_pipeline(files, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=2):
"""Creates a pipefile from a list of image files.
Includes batch generator/central crop/resizing options.
The resulting generator will dequeue the images batch_size at a time until
it throws tf.errors.OutOfRangeError when there are no more images left in
the queue.
Parameters
----------
files : list
List of paths to image files.
batch_size : int
Number of image files to load at a time.
n_epochs : int
Number of epochs to run before raising tf.errors.OutOfRangeError
shape : list
[height, width, channels]
crop_shape : list
[height, width] to crop image to.
crop_factor : float
Percentage of image to take starting from center.
n_threads : int, optional
Number of threads to use for batch shuffling
"""
# We first create a "producer" queue. It creates a production line which
# will queue up the file names and allow another queue to deque the file
# names all using a tf queue runner.
# Put simply, this is the entry point of the computational graph.
# It will generate the list of file names.
# We also specify it's capacity beforehand.
producer = tf.train.string_input_producer(
files, capacity=len(files))
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys, vals = reader.read(producer)
# And then have to decode its contents as we know it is a jpeg image
imgs = tf.image.decode_jpeg(
vals,
channels=3 if len(shape) > 2 and shape[2] == 3 else 0)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs = tf.image.resize_images(imgs, rsz_shape)
crops = (tf.image.resize_image_with_crop_or_pad(
rszs, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files) // 100
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads)
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def gtzan_music_speech_download(dst='gtzan_music_speech'):
"""Download the GTZAN music and speech dataset.
Parameters
----------
dst : str, optional
Location to put the GTZAN music and speech datset.
"""
path = 'http://opihi.cs.uvic.ca/sound/music_speech.tar.gz'
download_and_extract_tar(path, dst)
def gtzan_music_speech_load(dst='gtzan_music_speech'):
"""Load the GTZAN Music and Speech dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of GTZAN Music and Speech dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
from scipy.io import wavfile
if not os.path.exists(dst):
gtzan_music_speech_download(dst)
music_dir = os.path.join(os.path.join(dst, 'music_speech'), 'music_wav')
music = [os.path.join(music_dir, file_i)
for file_i in os.listdir(music_dir)
if file_i.endswith('.wav')]
speech_dir = os.path.join(os.path.join(dst, 'music_speech'), 'speech_wav')
speech = [os.path.join(speech_dir, file_i)
for file_i in os.listdir(speech_dir)
if file_i.endswith('.wav')]
Xs = []
ys = []
for i in music:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(0)
for i in speech:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(1)
Xs = np.array(Xs)
Xs = np.transpose(Xs, [0, 2, 3, 1])
ys = np.array(ys)
return Xs, ys
def cifar10_download(dst='cifar10'):
"""Download the CIFAR10 dataset.
Parameters
----------
dst : str, optional
Directory to download into.
"""
path = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
download_and_extract_tar(path, dst)
def cifar10_load(dst='cifar10'):
"""Load the CIFAR10 dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of CIFAR10 dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
if not os.path.exists(dst):
cifar10_download(dst)
Xs = None
ys = None
for f in range(1, 6):
cf = pickle.load(open(
'%s/cifar-10-batches-py/data_batch_%d' % (dst, f), 'rb'),
encoding='LATIN')
if Xs is not None:
Xs = np.r_[Xs, cf['data']]
ys = np.r_[ys, np.array(cf['labels'])]
else:
Xs = cf['data']
ys = cf['labels']
Xs = np.swapaxes(np.swapaxes(Xs.reshape(-1, 3, 32, 32), 1, 3), 1, 2)
return Xs, ys
def dense_to_one_hot(labels, n_classes=2):
"""Convert class labels from scalars to one-hot vectors.
Parameters
----------
labels : array
Input labels to convert to one-hot representation.
n_classes : int, optional
Number of possible one-hot.
Returns
-------
one_hot : array
One hot representation of input.
"""
return np.eye(n_classes).astype(np.float32)[labels]
class DatasetSplit(object):
"""Utility class for batching data and handling multiple splits.
Attributes
----------
current_batch_idx : int
Description
images : np.ndarray
Xs of the dataset. Not necessarily images.
labels : np.ndarray
ys of the dataset.
n_labels : int
Number of possible labels
num_examples : int
Number of total observations
"""
def __init__(self, images, labels):
"""Initialize a DatasetSplit object.
Parameters
----------
images : np.ndarray
Xs/inputs
labels : np.ndarray
ys/outputs
"""
self.images = np.array(images).astype(np.float32)
if labels is not None:
self.labels = np.array(labels).astype(np.int32)
self.n_labels = len(np.unique(labels))
else:
self.labels = None
self.num_examples = len(self.images)
def next_batch(self, batch_size=100):
"""Batch generator with randomization.
Parameters
----------
batch_size : int, optional
Size of each minibatch.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Next batch of inputs and labels (if no labels, then None).
"""
# Shuffle each epoch
current_permutation = np.random.permutation(range(len(self.images)))
epoch_images = self.images[current_permutation, ...]
if self.labels is not None:
epoch_labels = self.labels[current_permutation, ...]
# Then iterate over the epoch
self.current_batch_idx = 0
while self.current_batch_idx < len(self.images):
end_idx = min(
self.current_batch_idx + batch_size, len(self.images))
this_batch = {
'images': epoch_images[self.current_batch_idx:end_idx],
'labels': epoch_labels[self.current_batch_idx:end_idx]
if self.labels is not None else None
}
self.current_batch_idx += batch_size
yield this_batch['images'], this_batch['labels']
class Dataset(object):
"""Create a dataset from data and their labels.
Allows easy use of train/valid/test splits; Batch generator.
Attributes
----------
all_idxs : list
All indexes across all splits.
all_inputs : list
All inputs across all splits.
all_labels : list
All labels across all splits.
n_labels : int
Number of labels.
split : list
Percentage split of train, valid, test sets.
test_idxs : list
Indexes of the test split.
train_idxs : list
Indexes of the train split.
valid_idxs : list
Indexes of the valid split.
"""
def __init__(self, Xs, ys=None, split=[1.0, 0.0, 0.0], one_hot=False):
"""Initialize a Dataset object.
Parameters
----------
Xs : np.ndarray
Images/inputs to a network
ys : np.ndarray
Labels/outputs to a network
split : list, optional
Percentage of train, valid, and test sets.
one_hot : bool, optional
Whether or not to use one-hot encoding of labels (ys).
"""
self.all_idxs = []
self.all_labels = []
self.all_inputs = []
self.train_idxs = []
self.valid_idxs = []
self.test_idxs = []
self.n_labels = 0
self.split = split
# Now mix all the labels that are currently stored as blocks
self.all_inputs = Xs
n_idxs = len(self.all_inputs)
idxs = range(n_idxs)
rand_idxs = np.random.permutation(idxs)
self.all_inputs = self.all_inputs[rand_idxs, ...]
if ys is not None:
self.all_labels = ys if not one_hot else dense_to_one_hot(ys)
self.all_labels = self.all_labels[rand_idxs, ...]
else:
self.all_labels = None
# Get splits
self.train_idxs = idxs[:round(split[0] * n_idxs)]
self.valid_idxs = idxs[len(self.train_idxs):
len(self.train_idxs) + round(split[1] * n_idxs)]
self.test_idxs = idxs[
(len(self.valid_idxs) + len(self.train_idxs)):
(len(self.valid_idxs) + len(self.train_idxs)) +
round(split[2] * n_idxs)]
@property
def X(self):
"""Inputs/Xs/Images.
Returns
-------
all_inputs : np.ndarray
Original Inputs/Xs.
"""
return self.all_inputs
@property
def Y(self):
"""Outputs/ys/Labels.
Returns
-------
all_labels : np.ndarray
Original Outputs/ys.
"""
return self.all_labels
@property
def train(self):
"""Train split.
Returns
-------
split : DatasetSplit
Split of the train dataset.
"""
if len(self.train_idxs):
inputs = self.all_inputs[self.train_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.train_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def valid(self):
"""Validation split.
Returns
-------
split : DatasetSplit
Split of the validation dataset.
"""
if len(self.valid_idxs):
inputs = self.all_inputs[self.valid_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.valid_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def test(self):
"""Test split.
Returns
-------
split : DatasetSplit
Split of the test dataset.
"""
if len(self.test_idxs):
inputs = self.all_inputs[self.test_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.test_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
def mean(self):
"""Mean of the inputs/Xs.
Returns
-------
mean : np.ndarray
Calculates mean across 0th (batch) dimension.
"""
return np.mean(self.all_inputs, axis=0)
def std(self):
"""Standard deviation of the inputs/Xs.
Returns
-------
std : np.ndarray
Calculates std across 0th (batch) dimension.
"""
return np.std(self.all_inputs, axis=0)
| apache-2.0 |
shashi28/nuts | FireBolt/firewall.py | 1 | 1979 | import platform
import pydivert
from pydivert.windivert import *
from pydivert.winutils import *
from pydivert.enum import *
from pydivert.models import *
from pydivert.decorators import *
from PyQt4.QtCore import *
import impacket
from impacket.ImpactDecoder import EthDecoder
version = '1.0'
class Bolt(QThread):
def __init__(self,parent = None):
super(Bolt,self).__init__(parent)
self.block = True
driver_dir = os.path.join(os.path.realpath(os.curdir), "lib", version)
if platform.architecture()[0] == "32bit":
driver_dir = os.path.join(driver_dir, "x86")
else:
driver_dir = os.path.join(driver_dir, "amd64")
os.chdir(driver_dir)
reg_key = r"SYSTEM\CurrentControlSet\Services\WinDivert" + version
dll_path = os.path.join(driver_dir, "WinDivert.dll")
self.dev = WinDivert(dll_path)
self.dev.register()
self.decoder = EthDecoder()
def drop(self):
with Handle(filter=self.filter,layer=Layer.NETWORK,priority=0,flags=0) as handle:
while self.block:
rawdata = handle.recv()
self.pkt = self.dev.parse_packet(rawdata)
protocol = self.calcProtocol()
self.emit(SIGNAL('tableinput(QString,QString,QString,QString,QString,QString)'),str(self.pkt.src_addr),str(self.pkt.dst_addr),str(protocol),str(self.pkt.src_port),str(self.pkt.dst_port),str(self.pkt))
def calcProtocol(self):
if self.pkt.ipv4_hdr is not None:
if self.pkt.ipv4_hdr.Protocol == 1:
return 'icmp'
elif self.pkt.ipv4_hdr.Protocol == 6:
return 'tcp'
elif self.pkt.ipv4_hdr.Protocol == 17:
return 'udp'
def run(self):
self.drop()
self.exec_()
def setFilter(self,filtr):
self.filter = str(filtr)
self.block = True
def handle_slot_stop(self):
self.block = False | mit |
emreg00/biana | biana/BianaObjects/output_utilities.py | 2 | 2098 |
def get_html_table_header(columns, attributes):
attributes_str = " ".join([ "%s=\"%s\"" %(x[0],x[1]) for x in attributes ])
th_str = "<tr>%s</tr>" %"".join([ "<th>%s</th>" %x for x in columns ])
return "<table %s>%s" %(attributes_str,th_str)
#return "<table id=\"biana\" %s>%s" %(attributes_str,th_str)
def get_html_table_foot():
return "</table>\n"
def append_html_table_values(values,rowIDs=None,omit_special_chars=True):
if rowIDs is None:
rowIDs = [ x for x in xrange(len(values)) ]
if omit_special_chars:
data_str = "".join([ "<tr rowID=\"%s\">%s</tr>" %(rowIDs[current_row_index],"".join([ "<td>%s</td>" %str(current_column).replace("&","").replace("<","").replace(">","") for current_column in values[current_row_index] ])) for current_row_index in xrange(len(values)) ])
else:
data_str = "".join([ "<tr rowID=\"%s\">%s</tr>" %(rowIDs[current_row_index],"".join([ "<td>%s</td>" % str(current_column) for current_column in values[current_row_index] ])) for current_row_index in xrange(len(values)) ])
return data_str
def get_html_table(columns,values,rowIDs=None,attributes=[],title=""):
"""
"columns" must be a list with column headers
"""
if rowIDs is None:
rowIDs = [ x for x in xrange(len(values)) ]
attributes_str = " ".join([ "%s=\"%s\"" %(x[0],x[1]) for x in attributes ])
th_str = "<tr>%s</tr>" %"".join([ "<th>%s</th>" %x for x in columns ])
data_str = "".join([ "<tr rowID=\"%s\">%s</tr>" %(rowIDs[current_row_index],"".join([ "<td>%s</td>" %str(current_column).replace("&","").replace("<","").replace(">","") for current_column in values[current_row_index] ])) for current_row_index in xrange(len(values)) ])
return "<table %s>%s%s</table>\n" %(attributes_str,th_str,data_str)
def get_tabulated_table(values,columns=None):
"""
"""
if columns is not None:
#to_print = ["\t".join(columns)]
to_print = [list(columns)]
else:
to_print = []
to_print.extend(values)
return "%s\n" %"\n".join( [ "\t".join(map(str,x)) for x in to_print ] )
| gpl-3.0 |
square/pants | tests/python/pants_test/tasks/test_scrooge_gen.py | 2 | 4319 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
from textwrap import dedent
from twitter.common.collections import OrderedSet
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.tasks.scrooge_gen import ScroogeGen
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.address import SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import TaskError
from pants.goal.context import Context
from pants.util.dirutil import safe_rmtree
from pants_test.tasks.test_base import TaskTest
import pytest
from mock import MagicMock, patch
# TODO (tdesai) Issue-240: Use JvmToolTaskTestBase for ScroogeGenTest
class ScroogeGenTest(TaskTest):
@classmethod
def task_type(cls):
return ScroogeGen
@property
def alias_groups(self):
return BuildFileAliases.create(targets={'java_thrift_library': JavaThriftLibrary})
def setUp(self):
super(ScroogeGenTest, self).setUp()
self.task_outdir = os.path.join(self.build_root, 'scrooge', 'gen-java')
def tearDown(self):
super(ScroogeGenTest, self).tearDown()
safe_rmtree(self.task_outdir)
def test_validate(self):
defaults = JavaThriftLibrary.Defaults()
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='three',
sources=[],
dependencies=[':one'],
rpc_style='finagle',
)
'''))
ScroogeGen._validate(defaults, [self.target('test_validate:one')])
ScroogeGen._validate(defaults, [self.target('test_validate:two')])
with pytest.raises(TaskError):
ScroogeGen._validate(defaults, [self.target('test_validate:three')])
def test_smoke(self):
contents = dedent('''namespace java com.pants.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('test_smoke', dedent('''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='scala',
rpc_style='finagle'
)
'''))
target = self.target('test_smoke:a')
task = self.prepare_task(build_graph=self.build_graph,
targets=[target],
build_file_parser=self.build_file_parser)
with patch('pants.backend.codegen.tasks.scrooge_gen.calculate_services'):
task._outdir = MagicMock()
task._outdir.return_value = self.task_outdir
task.gen = MagicMock()
sources = [os.path.join(self.task_outdir, 'com/pants/example/Example.scala')]
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
Context.add_new_target = MagicMock()
task.execute()
relative_task_outdir = os.path.relpath(self.task_outdir, get_buildroot())
spec = '{spec_path}:{name}'.format(spec_path=relative_task_outdir, name='test_smoke.a')
address = SyntheticAddress.parse(spec=spec)
Context.add_new_target.assert_called_once_with(address,
ScalaLibrary,
sources=sources,
excludes=OrderedSet(),
dependencies=OrderedSet(),
provides=None,
derived_from=target)
finally:
Context.add_new_target = saved_add_new_target
| apache-2.0 |
cloudwatt/contrail-controller | src/analytics/contrail-snmp-collector/contrail_snmp_collector/snmpcfg.py | 2 | 8138 | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from device_config import DeviceConfig
import discoveryclient.client as client
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, \
API_SERVER_DISCOVERY_SERVICE_NAME
class CfgParser(object):
CONF_DEFAULT_PATH = '/etc/contrail/contrail-snmp-collector.conf'
def __init__(self, argv):
self._devices = []
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-snmp-scanner --log_level SYS_DEBUG
--logging_level DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--conf_file /etc/contrail/contrail-snmp-scanner.conf
conf file example:
[DEFAULTS]
log_local = 0
log_level = SYS_DEBUG
log_category =
log_file = /var/log/contrail/contrail-analytics-api.log
file = /etc/contrail/snmp-dev.ini
/etc/contrail/snmp-dev.ini example:
#snmp version 1 or 2
[1.1.1.190]
Community = public
Version = 2
#snmp version 3
[1.1.1.191]
Version = 3
SecLevel = authPriv
AuthProto = SHA
AuthPass = foo
PrivProto = AES
PrivPass = foo
SecName = snmpuser
# Mibs default to all, to get a subset
Mibs = LldpTable, ArpTable
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
kwargs = {'help': "Specify config file", 'metavar':"FILE"}
if os.path.exists(self.CONF_DEFAULT_PATH):
kwargs['default'] = self.CONF_DEFAULT_PATH
conf_parser.add_argument("-c", "--conf_file", **kwargs)
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'collectors' : ['127.0.0.1:8086'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'scan_frequency' : 600,
'http_server_port': 5920,
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
defaults.update(disc_opts)
parser.set_defaults(**defaults)
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--scan_frequency", type=int,
help="Time between snmp poll")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
#parser.add_argument("--discovery_server",
# help="ip:port of dicovery server")
parser.add_argument("--disc_server_ip",
help="Discovery Server IP address")
parser.add_argument("--disc_server_port", type=int,
help="Discovery Server port")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--device-config-file",
help="where to look for snmp credentials")
group.add_argument("--api_server",
help="ip:port of api-server for snmp credentials")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
self._args.config_sections = config
self._disc = None
def devices(self):
if self._args.device_config_file:
self._devices = DeviceConfig.fom_file(
self._args.device_config_file)
elif self._args.api_server:
self._devices = DeviceConfig.fom_api_server(
self._args.api_server,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name)
elif self._args.disc_server_port:
try:
self._devices = DeviceConfig.fom_api_server(
self.get_api_svr(), self._args.admin_user,
self._args.admin_password, self._args.admin_tenant_name)
except Exception as e:
self._devices = []
for d in self._devices:
yield d
def get_api_svr(self):
if self._disc is None:
self._disc = client.DiscoveryClient(*self.discovery_params())
a = self._disc.subscribe(API_SERVER_DISCOVERY_SERVICE_NAME, 0)
d = a.read()
return d[-1]['ip-address'] + ':' + d[-1]['port']
def discovery_params(self):
if self._args.disc_server_ip:
ip, port = self._args.disc_server_ip, \
self._args.disc_server_port
else:
ip, port = '127.0.0.1', self._args.disc_server_port
return ip, port, ModuleNames[Module.CONTRAIL_SNMP_COLLECTOR]
def collectors(self):
return self._args.collectors
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def frequency(self):
return self._args.scan_frequency
def http_port(self):
return self._args.http_server_port
| apache-2.0 |
muffl0n/ansible | test/units/parsing/test_data_loader.py | 58 | 3231 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping
class TestDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = ("""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = ("""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
| gpl-3.0 |
cherrygirl/micronaet7 | price_quotation_history/wizard/__init__.py | 1 | 1072 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import compare
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
darrenbilby/grr | client/client_actions/windows/windows.py | 6 | 15608 | #!/usr/bin/env python
"""Windows specific actions.
Most of these actions share an interface (in/out rdfvalues) with linux actions
of the same name. Windows-only actions are registered with the server via
libs/server_stubs.py
"""
import binascii
import ctypes
import exceptions
import logging
import os
import tempfile
import _winreg
import pythoncom
import pywintypes
import win32api
import win32com.client
import win32file
import win32service
import win32serviceutil
import winerror
import wmi
from grr.client import actions
from grr.client.client_actions import standard
from grr.lib import config_lib
from grr.lib import constants
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import protodict as rdf_protodict
# Properties to remove from results sent to the server.
# These properties are included with nearly every WMI object and use space.
IGNORE_PROPS = ["CSCreationClassName", "CreationClassName", "OSName",
"OSCreationClassName", "WindowsVersion", "CSName"]
DRIVER_MAX_SIZE = 1024 * 1024 * 20 # 20MB
def UnicodeFromCodePage(string):
"""Attempt to coerce string into a unicode object."""
# get the current code page
codepage = ctypes.windll.kernel32.GetOEMCP()
try:
return string.decode("cp%s" % codepage)
except UnicodeError:
try:
return string.decode("utf16", "ignore")
except UnicodeError:
# Fall back on utf8 but ignore errors
return string.decode("utf8", "ignore")
class GetInstallDate(actions.ActionPlugin):
"""Estimate the install date of this system."""
out_rdfvalue = rdf_protodict.DataBlob
def Run(self, unused_args):
"""Estimate the install date of this system."""
# Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
subkey = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion",
0, _winreg.KEY_READ)
install_date = _winreg.QueryValueEx(subkey, "InstallDate")
self.SendReply(integer=install_date[0])
class EnumerateUsers(actions.ActionPlugin):
"""Enumerates all the users on this system."""
out_rdfvalue = rdf_client.User
def GetUsersAndHomeDirs(self):
"""Gets the home directory from the registry for all users on the system.
Returns:
A list of tuples containing (username, sid, homedirectory) for each user.
"""
profiles_key = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList"
results = []
try:
# Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
user_key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, profiles_key, 0,
_winreg.KEY_ENUMERATE_SUB_KEYS)
try:
index = 0
while True:
sid = _winreg.EnumKey(user_key, index)
index += 1
# Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
homedir_key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, profiles_key + "\\" + sid, 0,
_winreg.KEY_QUERY_VALUE)
(homedir, _) = _winreg.QueryValueEx(homedir_key, "ProfileImagePath")
username = os.path.basename(homedir)
results.append((username, sid, homedir))
_winreg.CloseKey(homedir_key)
except exceptions.WindowsError:
# No more values.
pass
_winreg.CloseKey(user_key)
except exceptions.WindowsError:
logging.error("Could not enumerate users.")
return results
def GetSpecialFolders(self, sid):
"""Retrieves all the special folders from the registry."""
folders_key = (r"%s\Software\Microsoft\Windows"
r"\CurrentVersion\Explorer\Shell Folders")
try:
key = _winreg.OpenKey(_winreg.HKEY_USERS, folders_key % sid)
except exceptions.WindowsError:
# For users that are not logged in this key will not exist. If we return
# None here, they will be guessed for now.
return
response = {}
for (reg_key, _, pb_field) in self.special_folders:
try:
(folder, _) = _winreg.QueryValueEx(key, reg_key)
if folder:
response[pb_field] = folder
except exceptions.WindowsError:
pass
return rdf_client.FolderInformation(**response)
def GetWMIAccount(self, result, sid, homedir, known_sids):
if result["SID"] not in known_sids:
# There could be a user in another domain with the same name,
# we just ignore this.
return None
response = {"username": result["Name"],
"domain": result["Domain"],
"sid": result["SID"],
"homedir": homedir}
profile_folders = self.GetSpecialFolders(sid)
if not profile_folders:
# TODO(user): The user's registry file is not mounted. The right
# way would be to open the ntuser.dat and parse the keys from there
# but we don't have registry file reading capability yet. For now,
# we just try to guess the folders.
folders_found = {}
for (_, folder, field) in self.special_folders:
path = os.path.join(homedir, folder)
try:
os.stat(path)
folders_found[field] = path
except exceptions.WindowsError:
pass
profile_folders = rdf_client.FolderInformation(**folders_found)
response["special_folders"] = profile_folders
return response
def Run(self, unused_args):
"""Enumerate all users on this machine."""
self.special_folders = constants.profile_folders
homedirs = self.GetUsersAndHomeDirs()
known_sids = [sid for (_, sid, _) in homedirs]
for (user, sid, homedir) in homedirs:
# This query determines if the sid corresponds to a real user account.
for result in RunWMIQuery("SELECT * FROM Win32_UserAccount "
"WHERE name=\"%s\"" % user):
response = self.GetWMIAccount(result, sid, homedir, known_sids)
if response:
self.SendReply(**response)
class EnumerateInterfaces(actions.ActionPlugin):
"""Enumerate all MAC addresses of all NICs.
Win32_NetworkAdapterConfiguration definition:
http://msdn.microsoft.com/en-us/library/aa394217(v=vs.85).aspx
"""
out_rdfvalue = rdf_client.Interface
def RunNetAdapterWMIQuery(self):
pythoncom.CoInitialize()
for interface in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
addresses = []
for ip_address in interface.IPAddress:
addresses.append(rdf_client.NetworkAddress(
human_readable_address=ip_address))
args = {"ifname": interface.Description}
args["mac_address"] = binascii.unhexlify(
interface.MACAddress.replace(":", ""))
if addresses:
args["addresses"] = addresses
yield args
def Run(self, unused_args):
"""Enumerate all MAC addresses."""
for interface_dict in self.RunNetAdapterWMIQuery():
self.SendReply(**interface_dict)
class EnumerateFilesystems(actions.ActionPlugin):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalue = rdf_client.Filesystem
def Run(self, unused_args):
"""List all local filesystems mounted on this system."""
for drive in win32api.GetLogicalDriveStrings().split("\x00"):
if drive:
try:
volume = win32file.GetVolumeNameForVolumeMountPoint(
drive).rstrip("\\")
label, _, _, _, fs_type = win32api.GetVolumeInformation(drive)
self.SendReply(device=volume,
mount_point="/%s:/" % drive[0],
type=fs_type, label=UnicodeFromCodePage(label))
except win32api.error:
pass
class Uninstall(actions.ActionPlugin):
"""Remove the service that starts us at startup."""
out_rdfvalue = rdf_protodict.DataBlob
def Run(self, unused_arg):
"""This kills us with no cleanups."""
logging.debug("Disabling service")
win32serviceutil.ChangeServiceConfig(
None, config_lib.CONFIG["Nanny.service_name"],
startType=win32service.SERVICE_DISABLED)
svc_config = QueryService(config_lib.CONFIG["Nanny.service_name"])
if svc_config[1] == win32service.SERVICE_DISABLED:
logging.info("Disabled service successfully")
self.SendReply(string="Service disabled.")
else:
self.SendReply(string="Service failed to disable.")
def QueryService(svc_name):
"""Query service and get its config."""
hscm = win32service.OpenSCManager(None, None,
win32service.SC_MANAGER_ALL_ACCESS)
result = None
try:
hs = win32serviceutil.SmartOpenService(hscm, svc_name,
win32service.SERVICE_ALL_ACCESS)
result = win32service.QueryServiceConfig(hs)
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
return result
class WmiQuery(actions.ActionPlugin):
"""Runs a WMI query and returns the results to a server callback."""
in_rdfvalue = rdf_client.WMIRequest
out_rdfvalue = rdf_protodict.Dict
def Run(self, args):
"""Run the WMI query and return the data."""
query = args.query
base_object = args.base_object or r"winmgmts:\root\cimv2"
if not query.upper().startswith("SELECT "):
raise RuntimeError("Only SELECT WMI queries allowed.")
for response_dict in RunWMIQuery(query, baseobj=base_object):
self.SendReply(response_dict)
def RunWMIQuery(query, baseobj=r"winmgmts:\root\cimv2"):
"""Run a WMI query and return a result.
Args:
query: the WMI query to run.
baseobj: the base object for the WMI query.
Yields:
rdf_protodict.Dicts containing key value pairs from the resulting COM
objects.
"""
pythoncom.CoInitialize() # Needs to be called if using com from a thread.
wmi_obj = win32com.client.GetObject(baseobj)
# This allows our WMI to do some extra things, in particular
# it gives it access to find the executable path for all processes.
wmi_obj.Security_.Privileges.AddAsString("SeDebugPrivilege")
# Run query
try:
query_results = wmi_obj.ExecQuery(query)
except pythoncom.com_error as e:
raise RuntimeError("Failed to run WMI query \'%s\' err was %s" %
(query, e))
# Extract results from the returned COMObject and return dicts.
try:
for result in query_results:
response = rdf_protodict.Dict()
for prop in result.Properties_:
if prop.Name not in IGNORE_PROPS:
# Protodict can handle most of the types we care about, but we may
# get some objects that we don't know how to serialize, so we tell the
# dict to set the value to an error message and keep going
response.SetItem(prop.Name, prop.Value, raise_on_error=False)
yield response
except pythoncom.com_error as e:
raise RuntimeError("WMI query data error on query \'%s\' err was %s" %
(e, query))
def CtlCode(device_type, function, method, access):
"""Prepare an IO control code."""
return (device_type << 16) | (access << 14) | (function << 2) | method
# IOCTLS for interacting with the driver.
INFO_IOCTRL = CtlCode(0x22, 0x100, 0, 3) # Get information.
CTRL_IOCTRL = CtlCode(0x22, 0x101, 0, 3) # Set acquisition modes.
class UninstallDriver(actions.ActionPlugin):
"""Unloads and deletes a memory driver.
Note that only drivers with a signature that validates with
Client.driver_signing_public_key can be uninstalled.
"""
in_rdfvalue = rdf_client.DriverInstallTemplate
@staticmethod
def UninstallDriver(driver_path, service_name, delete_file=False):
"""Unloads the driver and delete the driver file.
Args:
driver_path: Full path name to the driver file.
service_name: Name of the service the driver is loaded as.
delete_file: Should we delete the driver file after removing the service.
Raises:
OSError: On failure to uninstall or delete.
"""
try:
win32serviceutil.StopService(service_name)
except pywintypes.error as e:
if e[0] not in [winerror.ERROR_SERVICE_NOT_ACTIVE,
winerror.ERROR_SERVICE_DOES_NOT_EXIST]:
raise OSError("Could not stop service: {0}".format(e))
try:
win32serviceutil.RemoveService(service_name)
except pywintypes.error as e:
if e[0] != winerror.ERROR_SERVICE_DOES_NOT_EXIST:
raise OSError("Could not remove service: {0}".format(e))
if delete_file:
try:
if os.path.exists(driver_path):
os.remove(driver_path)
except (OSError, IOError) as e:
raise OSError("Driver deletion failed: " + str(e))
def Run(self, args):
"""Unloads a driver."""
# This is kind of lame because we dont really check the driver is
# the same as the one that we are going to uninstall.
args.driver.Verify(config_lib.CONFIG["Client.driver_signing_public_key"])
self.UninstallDriver(driver_path=None, service_name=args.driver_name,
delete_file=False)
class InstallDriver(UninstallDriver):
"""Installs a driver.
Note that only drivers with a signature that validates with
Client.driver_signing_public_key can be loaded.
"""
in_rdfvalue = rdf_client.DriverInstallTemplate
@staticmethod
def InstallDriver(driver_path, service_name, driver_display_name):
"""Loads a driver and start it."""
hscm = win32service.OpenSCManager(None, None,
win32service.SC_MANAGER_ALL_ACCESS)
try:
win32service.CreateService(hscm,
service_name,
driver_display_name,
win32service.SERVICE_ALL_ACCESS,
win32service.SERVICE_KERNEL_DRIVER,
win32service.SERVICE_DEMAND_START,
win32service.SERVICE_ERROR_IGNORE,
driver_path,
None, # No load ordering
0, # No Tag identifier
None, # Service deps
None, # User name
None) # Password
win32serviceutil.StartService(service_name)
except pywintypes.error as e:
# The following errors are expected:
if e[0] not in [winerror.ERROR_SERVICE_EXISTS,
winerror.ERROR_SERVICE_MARKED_FOR_DELETE]:
raise RuntimeError("StartService failure: {0}".format(e))
def Run(self, args):
"""Initializes the driver."""
self.SyncTransactionLog()
# This will raise if the signature is bad.
args.driver.Verify(config_lib.CONFIG["Client.driver_signing_public_key"])
if args.force_reload:
try:
self.UninstallDriver(None, args.driver_name, delete_file=False)
except Exception as e: # pylint: disable=broad-except
logging.debug("Error uninstalling driver: %s", e)
path_handle, path_name = tempfile.mkstemp(suffix=".sys")
try:
# TODO(user): Ensure we have lock here, no races
logging.info("Writing driver to %s", path_name)
# Note permissions default to global read, user only write.
try:
os.write(path_handle, args.driver.data)
finally:
os.close(path_handle)
self.InstallDriver(path_name, args.driver_name, args.driver_display_name)
finally:
os.unlink(path_name)
class UpdateAgent(standard.ExecuteBinaryCommand):
"""Updates the GRR agent to a new version."""
# For Windows this is just an alias to ExecuteBinaryCommand.
| apache-2.0 |
sevo/closure_decorator | decorator/tests.py | 1 | 2736 | import unittest
import exercises as e
from functools import reduce
from operator import add
class DecoratorTests(unittest.TestCase):
def freeze(self, *args):
return tuple(args)
def print_output(self, index=0):
return e.print.__closure__[1].cell_contents[index]
def local_variable_value(self, fct, index=0):
return fct.__closure__[index].cell_contents
def setUp(self):
for _ in range(len(e.print.__closure__[1].cell_contents)):
del e.print.__closure__[1].cell_contents[0]
# Pomocny test, ktory zistuje ci ide vsetko tak kao ma. Toto nieje test k uloham
def test_simple(self):
e.simple('pokus')
self.assertEqual(self.print_output(), self.freeze('pokus'))
self.setUp()
e.simple('pokus')
e.simple('pokus2')
self.assertEqual(self.print_output(), self.freeze('pokus'))
self.assertEqual(self.print_output(1), self.freeze('pokus2'))
# Uloha 1
def test_to_be_implemented_decorator(self):
@e.to_be_implemented_decorator
def fct():
pass
fct()
self.assertEqual(self.print_output(), self.freeze('To be implemented.'))
# Uloha 2
def test_starting_decorator(self):
@e.starting_decorator
def fct(str):
e.print(str)
fct('haha')
self.assertEqual(self.print_output(), self.freeze('fct: Starting'))
self.assertEqual(self.print_output(1), self.freeze('haha'))
# Uloha 3
def test_count_decorator(self):
@e.count_decorator
def fct():
pass
fct()
self.assertEqual(self.local_variable_value(fct), 1)
for _ in range(0, 10):
fct()
self.assertEqual(self.local_variable_value(fct), 11)
# Uloha 4
def test_memoize(self):
def wrapper():
counter = 0
def fct(i):
nonlocal counter
counter += 1
return i**2
return fct
fct = e.memoize(wrapper())
self.assertEqual(fct(5), 25)
self.assertEqual(self.local_variable_value(self.local_variable_value(fct, 0), 0), 1)
for _ in range(10):
fct(6)
self.assertEqual(self.local_variable_value(self.local_variable_value(fct, 0), 0), 2)
# Uloha 5
def test_test_array_of_positives(self):
@e.test_array_of_positives
def sum(array):
return reduce(add, array)
self.assertEqual(sum([1,2,3,4]), 10)
with self.assertRaises(ValueError):
sum()
with self.assertRaises(ValueError):
sum([-1,2,3,4])
with self.assertRaises(ValueError):
sum(['0',2,3,4])
| mit |
sspickle/assessdb | webapp/assessdb/rest_services.py | 1 | 3384 | """
Cornice Services for REST API
"""
import json
import traceback
import sqlalchemy as sa
from pyramid.response import Response
from .models import (
Person,
Instrument,
InstrumentItems,
Course,
Item,
Answer,
ItemAnswers,
)
from cornice import Service
person = Service(name='person', path='/restapi/persons/{id}', description="Person Service")
people = Service(name='people', path='/restapi/persons', description="People Service")
@person.get()
def get_person_info(request):
"""Get info for a person object"""
pid = request.matchdict.get('id','')
p=request.dbsession.query(Person).filter(Person.id==pid).first()
return {'id':p.id, 'last':p.last, 'first':p.first, 'upid':p.upid, 'email':p.email}
@people.get()
def get_people_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Person).order_by(Person.last.asc(),Person.first.asc()).all()
results=[]
for p in result:
results.append({'id':p.id, 'last':p.last, 'firstName':p.first, 'upid':p.upid, 'email':p.email})
return results
instrument = Service(name='instrument', path='/restapi/instruments/{id}', description="Instrument Service")
instruments = Service(name='instruments', path='/restapi/instruments', description="Instruments Service")
@instrument.get()
def get_instrument_info(request):
"""Get info for an instrument object"""
pid = request.matchdict.get('id','')
p=request.dbsession.query(Instrument).filter(Instrument.id==pid).first()
return {'id':p.id, 'name':p.name, 'description':p.description}
@instruments.get()
def get_people_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Instrument).order_by(Instrument.name.asc()).all()
results=[]
for p in result:
results.append({'id':p.id, 'name':p.name, 'description':p.description})
return results
course = Service(name='course', path='/restapi/courses/{id}', description="Course Service")
courses = Service(name='courses', path='/restapi/courses', description="Courses Service")
@course.get()
def get_course_info(request):
"""Get info for an instrument object"""
cid = request.matchdict.get('id','')
c=request.dbsession.query(Course).filter(Course.id==pid).first()
return {'id':c.id, 'subject':c.subject, 'num':c.num, 'sect':c.sect, 'term':c.term, 'CRN':c.CRN}
@courses.get()
def get_courses_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Course).order_by(Course.CRN.asc()).all()
results=[]
for c in result:
results.append({'id':c.id, 'subject':c.subject, 'num':c.num, 'sect':c.sect, 'term':c.term, 'CRN':c.CRN})
return results
item = Service(name='item', path='/restapi/items/{id}', description="Item Service")
items = Service(name='items', path='/restapi/items', description="Items Service")
@item.get()
def get_item_info(request):
"""Get info for an instrument object"""
cid = request.matchdict.get('id','')
c=request.dbsession.query(Item).filter(Item.id==pid).first()
return {'id':c.id, 'markup':c.markup}
@items.get()
def get_items_info(request):
"""Get a collection of person objects"""
result = request.dbsession.query(Item).all()
results=[]
for c in result:
results.append({'id':c.id, 'markup':c.markup})
return results
| bsd-2-clause |
gentledevil/ansible | test/units/module_utils/test_database.py | 325 | 5737 | import collections
import mock
import os
import re
from nose.tools import eq_
try:
from nose.tools import assert_raises_regexp
except ImportError:
# Python < 2.7
def assert_raises_regexp(expected, regexp, callable, *a, **kw):
try:
callable(*a, **kw)
except expected as e:
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(str(e)):
raise Exception('"%s" does not match "%s"' %
(regexp.pattern, str(e)))
else:
if hasattr(expected,'__name__'): excName = expected.__name__
else: excName = str(expected)
raise AssertionError("%s not raised" % excName)
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# Note: Using nose's generator test cases here so we can't inherit from
# unittest.TestCase
class TestQuotePgIdentifier(object):
# These are all valid strings
# The results are based on interpreting the identifier as a table name
valid = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
invalid = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
def check_valid_quotes(self, identifier, quoted_identifier):
eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
def test_valid_quotes(self):
for identifier in self.valid:
yield self.check_valid_quotes, identifier, self.valid[identifier]
def check_invalid_quotes(self, identifier, id_type, msg):
assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
def test_invalid_quotes(self):
for test in self.invalid:
yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
def test_how_many_dots(self):
eq_(pg_quote_identifier('role', 'role'), '"role"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
eq_(pg_quote_identifier('db', 'database'), '"db"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
| gpl-3.0 |
alanljj/connector-telephony | base_phone/wizard/number_not_found.py | 12 | 5462 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Base Phone module for Odoo
# Copyright (C) 2010-2015 Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
import logging
import phonenumbers
_logger = logging.getLogger(__name__)
class number_not_found(orm.TransientModel):
_name = "number.not.found"
_description = "Number not found"
_columns = {
'calling_number': fields.char(
'Calling Number', size=64, readonly=True,
help="Phone number of calling party that has been obtained "
"from the telephony server, in the format used by the "
"telephony server (not E.164)."),
'e164_number': fields.char(
'E.164 Number', size=64,
help="E.164 equivalent of the calling number."),
'number_type': fields.selection(
[('phone', 'Fixed'), ('mobile', 'Mobile')],
'Fixed/Mobile', required=True),
'to_update_partner_id': fields.many2one(
'res.partner', 'Partner to Update',
help="Partner on which the phone number will be written"),
'current_partner_phone': fields.related(
'to_update_partner_id', 'phone', type='char',
relation='res.partner', string='Current Phone', readonly=True),
'current_partner_mobile': fields.related(
'to_update_partner_id', 'mobile', type='char',
relation='res.partner', string='Current Mobile', readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(number_not_found, self).default_get(
cr, uid, fields_list, context=context
)
if not res:
res = {}
if res.get('calling_number'):
convert = self.pool['res.partner']._generic_reformat_phonenumbers(
cr, uid, None, {'phone': res.get('calling_number')},
context=context)
parsed_num = phonenumbers.parse(convert.get('phone'))
res['e164_number'] = phonenumbers.format_number(
parsed_num, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
number_type = phonenumbers.number_type(parsed_num)
if number_type == 1:
res['number_type'] = 'mobile'
else:
res['number_type'] = 'phone'
return res
def create_partner(self, cr, uid, ids, context=None):
'''Function called by the related button of the wizard'''
if context is None:
context = {}
wiz = self.browse(cr, uid, ids[0], context=context)
parsed_num = phonenumbers.parse(wiz.e164_number, None)
phonenumbers.number_type(parsed_num)
context['default_%s' % wiz.number_type] = wiz.e164_number
action = {
'name': _('Create New Partner'),
'view_mode': 'form,tree,kanban',
'res_model': 'res.partner',
'type': 'ir.actions.act_window',
'nodestroy': False,
'target': 'current',
'context': context,
}
return action
def update_partner(self, cr, uid, ids, context=None):
wiz = self.browse(cr, uid, ids[0], context=context)
if not wiz.to_update_partner_id:
raise orm.except_orm(
_('Error:'),
_("Select the Partner to Update."))
self.pool['res.partner'].write(
cr, uid, wiz.to_update_partner_id.id,
{wiz.number_type: wiz.e164_number}, context=context)
action = {
'name': _('Partner: %s' % wiz.to_update_partner_id.name),
'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form,tree,kanban',
'nodestroy': False,
'target': 'current',
'res_id': wiz.to_update_partner_id.id,
'context': context,
}
return action
def onchange_to_update_partner(
self, cr, uid, ids, to_update_partner_id, context=None):
res = {'value': {}}
if to_update_partner_id:
to_update_partner = self.pool['res.partner'].browse(
cr, uid, to_update_partner_id, context=context)
res['value'].update({
'current_partner_phone': to_update_partner.phone,
'current_partner_mobile': to_update_partner.mobile,
})
else:
res['value'].update({
'current_partner_phone': False,
'current_partner_mobile': False,
})
return res
| agpl-3.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/requests/packages/urllib3/util/wait.py | 226 | 1451 | from .selectors import (
HAS_SELECT,
DefaultSelector,
EVENT_READ,
EVENT_WRITE
)
def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events]
def wait_for_read(socks, timeout=None):
""" Waits for reading to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be read from immediately. """
return _wait_for_io_events(socks, EVENT_READ, timeout)
def wait_for_write(socks, timeout=None):
""" Waits for writing to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be written to immediately. """
return _wait_for_io_events(socks, EVENT_WRITE, timeout)
| mit |
loco-odoo/localizacion_co | openerp/netsvc.py | 37 | 8976 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] or ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
else:
handler = logging.handlers.SysLogHandler()
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(25)
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
r39132/airflow | airflow/contrib/operators/jenkins_job_trigger_operator.py | 1 | 11097 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import socket
import json
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
import jenkins
from jenkins import JenkinsException
from requests import Request
import six
from six.moves.urllib.error import HTTPError, URLError
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason)
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block to provide to jenkins. (templated)
:type parameters: str
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
@apply_defaults
def __init__(self,
jenkins_connection_id,
job_name,
parameters="",
sleep_time=10,
max_try_before_job_appears=10,
*args,
**kwargs):
super(JenkinsJobTriggerOperator, self).__init__(*args, **kwargs)
self.job_name = job_name
self.parameters = parameters
if sleep_time < 1:
sleep_time = 1
self.sleep_time = sleep_time
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
def build_job(self, jenkins_server):
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Warning if the parameter is too long, the URL can be longer than
# the maximum allowed size
if self.parameters and isinstance(self.parameters, six.string_types):
import ast
self.parameters = ast.literal_eval(self.parameters)
if not self.parameters:
# We need a None to call the non parametrized jenkins api end point
self.parameters = None
request = Request(jenkins_server.build_job_url(self.job_name,
self.parameters, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(jenkins_server,
Request(location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed"
" after polling the queue %d times",
self.max_try_before_job_appears)
def get_hook(self):
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context):
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator')
raise AirflowException('The jenkins_connection_id parameter is missing,'
'impossible to trigger the job')
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,'
'impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name, self.jenkins_connection_id, self.parameters)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server)
build_number = self.poll_job_in_queue(
jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name,
number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job had errors.
if build_info['result'] != 'SUCCESS':
raise AirflowException(
'Jenkins job failed, final state : %s.'
'Find more information on job url : %s'
% (build_info['result'], build_info['url']))
else:
self.log.info('Waiting for job to complete : %s , build %s',
self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
raise AirflowException(
'Jenkins job status check failed. Final error was: %s'
% err.resp.status)
except jenkins.JenkinsException as err:
raise AirflowException(
'Jenkins call failed with error : %s, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)', str(err))
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
| apache-2.0 |
pratikmallya/hue | desktop/core/ext-py/lxml/doc/s5/ep2008/atom.py | 50 | 18452 | # ET is 80's!
#import elementtree as etree
# LXML is 00's!
from lxml import etree
from lxml.etree import tostring
#from dateutil.parser import parse as parse_date
from datetime import datetime
import uuid
import cgi
import copy
__all__ = [
'ATOM', 'atom_ns', 'Element', 'tostring']
ATOM_NAMESPACE = atom_ns = 'http://www.w3.org/2005/Atom'
app_ns = 'http://www.w3.org/2007/app'
xhtml_ns = 'http://www.w3.org/1999/xhtml'
nsmap = {'': atom_ns, 'app': app_ns}
_rel_alternate_xpath = etree.XPath(
"./atom:link[not(@rel) or @rel = 'alternate']",
namespaces=dict(atom=atom_ns))
_rel_other_xpath = etree.XPath(
"./atom:link[@rel = $rel]",
namespaces=dict(atom=atom_ns))
class AtomLookup(etree.CustomElementClassLookup):
_elements = {}
_app_elements = {}
def lookup(self, node_type, document, namespace, name):
if node_type == 'element':
if namespace == atom_ns:
return self._elements.get(name, AtomElement)
elif namespace == app_ns:
return self._app_elements.get(name, APPElement)
## FIXME: is this default good?
return AtomElement
# Otherwise normal lookup
return None
atom_parser = etree.XMLParser()
atom_parser.setElementClassLookup(AtomLookup())
def parse(input):
return etree.parse(input, atom_parser)
def ATOM(atom):
"""
Parse an Atom document
"""
return etree.XML(atom, atom_parser)
def Element(tag, *args, **kw):
"""
Create an Atom element. Adds the Atom namespace if no namespace
is given.
"""
if '{' not in tag:
# No namespace means the atom namespace
tag = '{%s}%s' % (atom_ns, tag)
return atom_parser.makeelement(tag, *args, **kw)
def _strftime(d):
"""
Format a date the way Atom likes it (RFC3339?)
"""
return d.strftime('%Y-%m-%dT%H:%M:%SZ%z')
## try:
## from lxml import builder
## except ImportError:
## pass
## else:
## E = builder.ElementMaker(parser=atom_parser,
## typemap={datetime: lambda e, v: _strftime(v)})
from lxml import builder
E = builder.ElementMaker(#parser=atom_parser,
typemap={datetime: lambda e, v: _strftime(v)})
__all__.append('E')
class NoDefault:
pass
class _LiveList(list):
"""
This list calls on_add or on_remove whenever the list is modified.
"""
on_add = on_remove = None
name = None
def __init__(self, *args, **kw):
on_add = on_remove = name = None
if 'on_add' in kw:
on_add = kw.pop('on_add')
if 'on_remove' in kw:
on_remove = kw.pop('on_remove')
if 'name' in kw:
name = kw.pop('name')
list.__init__(self, *args, **kw)
self.on_add = on_add
self.on_remove = on_remove
self.name = name
def _make_list(self, obj):
if not isinstance(obj, (list, tuple)):
obj = list(obj)
return obj
def _do_add(self, items):
if self.on_add is not None:
for item in items:
self.on_add(self, item)
def _do_remove(self, items):
if self.on_remove is not None:
for item in items:
self.on_remove(self, item)
def __setslice__(self, i, j, other):
other = self._make_list(other)
old = self[i:j]
list.__setslice__(self, i, j, other)
self._do_remove(old)
self._do_add(other)
def __delslice__(self, i, j):
old = self[i:j]
list.__delslice__(self, i, j)
self._do_remove(old)
def __iadd__(self, other):
other = self._make_list(other)
list.__iadd__(self, other)
self._do_add(other)
def __imul__(self, n):
while n > 0:
self += self
n -= 1
def append(self, item):
list.append(self, item)
self._do_add([item])
def insert(self, i, item):
list.insert(self, i, item)
self._do_add([item])
def pop(self, i=-1):
item = self[i]
result = list.pop(self, i)
self._do_remove([item])
return result
def remove(self, item):
list.remove(self, item)
self._do_remove([item])
def extend(self, other):
for item in other:
self.append(item)
def __repr__(self):
name = self.name
if name is None:
name = '_LiveList'
return '%s(%s)' % (name, list.__repr__(self))
class _findall_property(object):
"""
Returns a LiveList of all the objects with the given tag. You can
append or remove items to the list to add or remove them from the
containing tag.
"""
def __init__(self, tag, ns=atom_ns):
self.tag = tag
self.ns = ns
self.__doc__ = 'Return live list of all the <atom:%s> element' % self.tag
def __get__(self, obj, type=None):
if obj is None:
return self
def add(lst, item):
# FIXME: shouldn't just be an append
obj.append(item)
def remove(lst, item):
obj.remove(item)
return _LiveList(obj._atom_iter(self.tag, ns=self.ns),
on_add=add, on_remove=remove,
name='live_%s_list' % self.tag)
def __set__(self, obj, value):
cur = self.__get__(obj)
cur[:] = value
class _text_element_property(object):
"""
Creates an attribute that returns the text content of the given
subelement. E.g., ``title = _text_element_property('title')``
will make ``obj.title`` return the contents of the ``<title>``.
Similarly setting the attribute sets the text content of the
attribute.
"""
def __init__(self, tag, strip=True):
self.tag = tag
self.strip = strip
self.__doc__ = 'Access the <atom:%s> element as text' % self.tag
def __get__(self, obj, type=None):
if obj is None:
return self
v = obj._atom_findtext(self.tag)
if self.strip:
if v is not None:
v = v.strip()
else:
return ''
return v
def __set__(self, obj, value):
el = obj._get_or_create(self.tag)
el.text = value
def __delete__(self, obj):
el = obj._atom_get(self.tag)
if el:
# FIXME: should it be an error if it doesn't exist?
obj.remove(el)
class _element_property(object):
"""
Returns a single subelement based on tag. Setting the attribute
removes the element and adds a new one. Deleting it removes the
element.
"""
def __init__(self, tag):
self.tag = tag
self.__doc__ = 'Get the <atom:%s> element' % self.tag
def __get__(self, obj, type=None):
if obj is None:
return self
return obj._atom_get(self.tag)
def __set__(self, obj, value):
el = obj._atom_get(self.tag)
if el is not None:
parent = el.getparent()
index = parent.index(el)
parent[index] = value
else:
obj.append(value)
def __delete__(self):
el = obj._atom_get(self.tag)
if el is not None:
obj.remove(el)
class _attr_element_property(object):
"""
Get/set the value of the attribute on this element.
"""
def __init__(self, attr, default=NoDefault):
self.attr = attr
self.default = default
self.__doc__ = 'Access the %s attribute' % self.attr
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.attrib[self.attr]
except KeyError:
if self.default is not NoDefault:
return self.default
raise AttributeError(self.attr)
def __set__(self, obj, value):
if value is None:
self.__delete__(obj)
else:
obj.attrib[self.attr] = value
def __delete__(self, obj):
if self.attr in obj.attrib:
del obj.attrib[self.attr]
class _date_element_property(object):
"""
Get/set the parsed date value of the text content of a tag.
"""
def __init__(self, tag, ns=atom_ns):
self.tag = tag
self.ns = ns
self.__doc__ = 'Access the date in %s' % self.tag
def __get__(self, obj, type=None):
if obj is None:
return self
el = obj._atom_get(self.tag, ns=self.ns)
if el is None:
return None
return el.date
def __set__(self, obj, value):
el = obj._get_or_create(self.tag, ns=self.ns)
el.date = value
def __delete__(self):
el = obj._atom_get(self.tag)
if el is not None:
obj.remove(el)
class _date_text_property(object):
def __get__(self, obj, type=None):
if obj is None:
return self
return parse_date(obj.text)
def __set__(self, obj, value):
if not value:
obj.text = None
return
if isinstance(value, datetime):
value = _strftime(value)
obj.text = value
def __del__(self, obj):
obj.text = None
class AtomElement(etree.ElementBase):
def _get_or_create(self, tag, ns=atom_ns):
el = self.find('{%s}%s' % (ns, tag))
if el is None:
el = self.makeelement('{%s}%s' % (ns, tag))
self.append(el)
return el
def _atom_get(self, tag, ns=atom_ns):
for item in self._atom_iter(tag, ns=ns):
return item
return None
def _atom_iter(self, tag, ns=atom_ns):
return self.getiterator('{%s}%s' % (ns, tag))
def _atom_findtext(self, tag, ns=atom_ns):
return self.findtext('{%s}%s' % (ns, tag))
def _get_parent(self, tag, ns=atom_ns):
parent = self
while 1:
if parent.tag == '{%s}%s' % (ns, tag):
return parent
parent = parent.getparent()
if parent is None:
return None
@property
def feed(self):
return self._get_parent('feed')
def rel_links(self, rel='alternate'):
"""
Return all the links with the given ``rel`` attribute. The
default relation is ``'alternate'``, and as specified for Atom
links with no ``rel`` attribute are assumed to mean alternate.
"""
if rel is None:
return self._atom_iter('link')
return [
el for el in self._atom_iter('link')
if el.get('rel') == rel
or rel == 'alternate' and not el.get('rel')]
def __repr__(self):
tag = self.tag
if '}' in tag:
tag = tag.split('}', 1)[1]
return '<%s.%s atom:%s at %s>' % (
self.__class__.__module__,
self.__class__.__name__,
tag,
hex(abs(id(self)))[2:])
class Feed(AtomElement):
"""
For ``<feed>`` elements.
"""
@property
def feed(self):
return self
entries = _findall_property('entry')
title = _text_element_property('title')
author = _element_property('author')
class Entry(AtomElement):
"""
For ``<entry>`` elements.
"""
@property
def entry(self):
return self
id = _text_element_property('id')
title = _text_element_property('title')
published = _date_element_property('published')
updated = _date_element_property('updated')
edited = _date_element_property('edited', ns=app_ns)
def update_edited(self):
"""
Set app:edited to current time
"""
self.edited = datetime.utcnow()
def update_updated(self):
"""
Set atom:updated to the current time
"""
self.updated = datetime.utcnow()
def make_id(self):
"""
Create an artificial id for this entry
"""
assert not self.id, (
"You cannot make an id if one already exists")
self.id = 'uuid:%s' % uuid.uuid4()
def author__get(self):
el = self._atom_get('author')
if el is None:
if self.feed is not None:
return self.feed.author
return el
def author__set(self, value):
el = self._atom_get('author')
if el is not None:
self.remove(el)
self.append(value)
def author__del(self):
el = self._atom_get('author')
if el is not None:
self.remove(el)
author = property(author__get, author__set, author__del)
categories = _findall_property('category')
class _EntryElement(AtomElement):
@property
def entry(self):
return self._get_parent('entry')
class Category(_EntryElement):
"""
For ``<category>`` elements.
"""
term = _attr_element_property('term')
scheme = _attr_element_property('scheme', None)
label = _attr_element_property('label', None)
def as_string(self):
"""
Returns the string representation of the category, using the
GData convention of ``{scheme}term``
"""
if self.scheme is not None:
return '{%s}%s' % (self.scheme, self.term)
else:
return self.term
class PersonElement(_EntryElement):
"""
Represents authors and contributors
"""
email = _text_element_property('email')
uri = _text_element_property('uri')
name = _text_element_property('name')
class DateElement(_EntryElement):
"""
For elements that contain a date in their text content.
"""
date = _date_text_property()
class TextElement(_EntryElement):
type = _attr_element_property('type', None)
src = _attr_element_property('src', None)
def _html__get(self):
"""
Gives the parsed HTML of element's content. May return an
HtmlElement (from lxml.html) or an XHTML tree. If the element
is ``type="text"`` then it is returned as quoted HTML.
You can also set this attribute to either an lxml.html
element, an XHTML element, or an HTML string.
Raises AttributeError if this is not HTML content.
"""
## FIXME: should this handle text/html types?
if self.type == 'html':
content = self.text
elif self.type == 'text':
content = cgi.escape(self.text)
elif self.type == 'xhtml':
div = copy.deepcopy(self[0])
# Now remove the namespaces:
for el in div.getiterator():
if el.tag.startswith('{'):
el.tag = el.tag.split('}', 1)[1]
if div.tag.startswith('{'):
div.tag = el.tag.split('}', 1)[1]
from lxml.html import tostring
content = tostring(div)
else:
raise AttributeError(
"Not an HTML or text content (type=%r)" % self.type)
from lxml.html import fromstring
return fromstring(content)
def _html__set(self, value):
if value is None:
del self.html
return
if isinstance(value, basestring):
# Some HTML text
self.type = 'html'
self.text = value
return
if value.tag.startswith('{%s}' % xhtml_ns):
if value.tag != '{%s}div' % xhtml_ns:
# Need to wrap it in a <div>
el = self.makeelement('{%s}div' % xhtml_ns)
el.append(value)
value = el
self[:] = []
self.type = 'xhtml'
self.append(value)
return
from lxml import html
if isinstance(value, html.HtmlElement):
value = tostring(value)
self[:] = []
self.type = 'html'
self.text = value
return
raise TypeError(
"Unknown HTML type: %s" % type(value))
def _html__del(self):
self.text = None
html = property(_html__get, _html__set, _html__del, doc=_html__get.__doc__)
def _binary__get(self):
"""
Gets/sets the binary content, which is base64 encoded in the
text.
"""
text = self.text
if text is None:
raise AttributeError(
"No text (maybe in src?)")
text = text.decode('base64')
return text
def _binary__set(self, value):
if isinstance(value, unicode):
## FIXME: is this kosher?
value = value.encode('utf8')
if not isinstance(value, str):
raise TypeError(
"Must set .binary to a str or unicode object (not %s)"
% type(value))
value = value.encode('base64')
self.text = value
def _binary__del(self):
self.text = None
binary = property(_binary__get, _binary__set, _binary__del, doc=_binary__get.__doc__)
class LinkElement(_EntryElement):
"""
For ``<link>`` elements.
"""
href = _attr_element_property('href', None)
rel = _attr_element_property('rel', None)
type = _attr_element_property('type', None)
title = _attr_element_property('title', None)
def __repr__(self):
return '<%s.%s at %s rel=%r href=%r>' % (
self.__class__.__module__,
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.rel, self.href)
AtomLookup._elements.update(dict(
feed=Feed,
entry=Entry,
category=Category,
author=PersonElement,
contributor=PersonElement,
published=DateElement,
updated=DateElement,
content=TextElement,
summary=TextElement,
title=TextElement,
rights=TextElement,
subtitle=TextElement,
link=LinkElement,
))
class APPElement(etree.ElementBase):
def __repr__(self):
tag = self.tag
if '}' in tag:
tag = tag.split('}', 1)[1]
return '<%s.%s app:%s at %s>' % (
self.__class__.__module__,
self.__class__.__name__,
tag,
hex(abs(id(self)))[2:])
class Service(APPElement):
workspaces = _findall_property('workspace', ns=app_ns)
class Workspace(APPElement):
collections = _findall_property('collection', ns=app_ns)
class Collection(APPElement):
pass
class Edited(APPElement):
date = _date_text_property()
AtomLookup._app_elements.update(dict(
service=Service,
workspace=Workspace,
collection=Collection,
edited=Edited,
))
| apache-2.0 |
pratikmallya/hue | desktop/core/ext-py/boto-2.38.0/boto/manage/propget.py | 153 | 2502 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print('[%d] %s' % (i, value))
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print('%s is not a valid choice' % value)
except IndexError:
print('%s is not within the range[%d-%d]' % (min, max))
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print('A value is required')
else:
valid = True
except:
print('Invalid value: %s' % value)
return value
| apache-2.0 |
MeshGeometry/dxfio | Test/googletest/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
chrismattmann/tika-python | tika/translate.py | 1 | 3248 | #!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .tika import doTranslate1, callServer, Translator, ServerEndpoint
def from_file(filename, srcLang, destLang, serverEndpoint=ServerEndpoint, requestOptions={}):
'''
Traslates the content of source file to destination language
:param filename: file whose contents needs translation
:param srcLang: name of language of input file
:param destLang: name of language of desired language
:param serverEndpoint: Tika server end point (Optional)
:return: translated content
'''
jsonOutput = doTranslate1(srcLang+':'+destLang, filename, serverEndpoint, requestOptions=requestOptions)
return jsonOutput[1]
def from_buffer(string, srcLang, destLang, serverEndpoint=ServerEndpoint, requestOptions={}):
'''
Translates content from source language to desired destination language
:param string: input content which needs translation
:param srcLang: name of language of the input content
:param destLang: name of the desired language for translation
:param serverEndpoint:
:return:
'''
status, response = callServer('put', ServerEndpoint, '/translate/all/'+Translator+'/'+srcLang+'/'+destLang,
string, {'Accept': 'text/plain'}, False, requestOptions=requestOptions)
return response
def auto_from_file(filename, destLang, serverEndpoint=ServerEndpoint, requestOptions={}):
'''
Translates contents of a file to desired language by auto detecting the source language
:param filename: file whose contents needs translation
:param destLang: name of the desired language for translation
:param serverEndpoint: Tika server end point (Optional)
:return:
'''
jsonOutput = doTranslate1(destLang, filename, serverEndpoint, requestOptions=requestOptions)
return jsonOutput[1]
def auto_from_buffer(string, destLang, serverEndpoint=ServerEndpoint, requestOptions={}):
'''
Translates content to desired language by auto detecting the source language
:param string: input content which needs translation
:param destLang: name of the desired language for translation
:param serverEndpoint: Tika server end point (Optional)
:return:
'''
status, response = callServer('put', ServerEndpoint, '/translate/all/'+Translator+'/'+destLang,
string, {'Accept': 'text/plain'}, False, requestOptions=requestOptions)
return response
| apache-2.0 |
sumedhasingla/VTK | Imaging/Core/Testing/Python/ReslicePermuteSlab.py | 20 | 3561 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different slab modes
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataScalarTypeToUnsignedShort()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetDataOrigin(-100.8,-100.8,-70.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
caster = vtk.vtkImageCast()
caster.SetInputConnection(reader.GetOutputPort())
caster.SetOutputScalarTypeToFloat()
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.SetSlabModeToMean()
reslice1.SlabTrapezoidIntegrationOn()
reslice1.SetSlabNumberOfSlices(45)
reslice1.SetInterpolationModeToLinear()
reslice1.SetOutputDimensionality(2)
reslice1.SetOutputSpacing(3.2,3.2,1.5)
reslice1.SetOutputExtent(0,63,0,63,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(caster.GetOutputPort())
reslice2.SetSlabModeToSum()
reslice2.SetSlabNumberOfSlices(93)
reslice2.SetInterpolationModeToLinear()
reslice2.SetOutputDimensionality(2)
reslice2.SetOutputSpacing(3.2,3.2,1.5)
reslice2.SetOutputExtent(0,63,0,63,0,0)
reslice2.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0])
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.SetSlabModeToMax()
reslice3.SetInterpolationModeToNearestNeighbor()
reslice3.SetSlabNumberOfSlices(50)
reslice3.SetOutputDimensionality(2)
reslice3.SetOutputSpacing(3.2,3.2,1.5)
reslice3.SetOutputExtent(0,63,0,63,0,0)
reslice3.SetResliceAxesDirectionCosines([0,+1,0,0,0,-1,-1,0,0])
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.SetSlabModeToMin()
reslice4.SetSlabNumberOfSlices(2)
reslice4.SetInterpolationModeToCubic()
reslice4.SetOutputDimensionality(2)
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputExtent(0,63,0,63,0,0)
reslice4.SetResliceAxesDirectionCosines([0,0,1,0,1,0,-1,0,0])
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(50000)
mapper2.SetColorLevel(100000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(150,128)
imgWin.Render()
# --- end of script --
| bsd-3-clause |
mtyka/pd | examples/restraint/run.py | 1 | 3081 | # PD is a free, modular C++ library for biomolecular simulation with a
# flexible and scriptable Python interface.
# Copyright (C) 2003-2013 Mike Tyka and Jon Rea
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pd import *
cseed(4)
info()
timer()
ffps = FFParamSet()
ffps.readLib("amber03aa.ff")
## test simple loading of PDB file
sim = PDB_In(ffps, "../pdb/trpcage.pdb");
sim.loadAll();
# create workspace
wspace = WorkSpace( sim )
# print loaded system - this can be compared later
wspace.printPDB("inout.pdb")
# create a few common forcefields
ff = Forcefield(wspace)
# set up a simple vaccuum forcefield
bonds = FF_Bonded(wspace)
nb = FF_NonBonded(wspace)
nb.Cutoff = 12.0
nb.InnerCutoff = 9.0
ff.add( bonds )
ff.add( nb )
# positionally restrain all atoms of residue 0
crest = FF_Restraint_Positional(wspace);
crest.setSelection( PickResidue( 0 ) );
crest.k = 20.0 ## restraint constant in kcal/mol/A^2
crest.detail()
ff.add(crest);
# positionally restrain all atoms of residue 0
irest = FF_Restraint_Internal(wspace);
irest.setSelection( PickMolecule( 0 ) );
irest.k = 20.0 ## restraint constant in kcal/mol/A^2
irest.detail()
ff.add(irest);
# positionally restrain all atoms of residue 0
trest = FF_Restraint_Torsional(wspace);
trest.OneRestraintPerBond = True
trest.setSelection( PickResidue( 0 ) );
trest.k = 0.01 ## restraint constant in kcal/mol/rad
trest.info()
trest.detail()
ff.add(trest);
# special native contact restraint
ncrest = FF_Restraint_NativeContact(wspace);
ncrest.setSelection( PickBackbone() );
ncrest.k = 1000 ## restraint constant in kcal/mol/rad
ncrest.info()
ncrest.detail()
ff.add(ncrest);
# special native contact restraint
atrest = FF_Restraint_AtomDistance(wspace);
atrest.k = 100 ## restraint constant in kcal/mol/rad
atrest.Dist_ij = 8; ## straint to a distance of 8 A, the distance between
atrest.Atom_i = 1; ## Atom 1
atrest.Atom_j = 60; ## and Atom 60
atrest.info()
ff.add(atrest);
## print energies as summary
ff.printEnergySummary()
ff.printEnergyByAtom();
## also show parameters
ff.info()
tra1 = OutTra_NAMD("output",wspace);
wspace.addTra(tra1)
## do some minimisation and a little MD
min = Minimisation(ff)
min.Steps = 25
min.UpdateScr = 1
min.UpdateTra = 0
min.run()
ff.printEnergySummary()
md = MolecularDynamics(ff)
md.Steps = 1000
md.UpdateScr = 10
md.UpdateTra = 10
md.UpdateNList = 10
md.Integrator = MolecularDynamics.Langevin
md.setTargetTemp(300)
md.run()
ff.printEnergySummary()
| gpl-3.0 |
morreene/tradenews | venv/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py | 490 | 4141 | import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.now()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
updated_headers = self.update_headers(response)
if updated_headers:
response.headers.update(updated_headers)
warning_header_value = self.warning(response)
if warning_header_value is not None:
response.headers.update({'Warning': warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = expire_after(timedelta(days=1),
date=datetime(*date[:6]))
headers['expires'] = datetime_to_header(expires)
headers['cache-control'] = 'public'
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {
'expires': datetime_to_header(expires),
'cache-control': 'public',
}
def warning(self, response):
tmpl = '110 - Automatically cached for %s. Response might be stale'
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = set([
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
])
def update_headers(self, resp):
headers = resp.headers
if 'expires' in headers:
return {}
if 'cache-control' in headers and headers['cache-control'] != 'public':
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if 'date' not in headers or 'last-modified' not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers['date']))
last_modified = parsedate(headers['last-modified'])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None
| bsd-3-clause |
openstate/yournextrepresentative | candidates/tests/test_update_view.py | 2 | 20073 | from urlparse import urlsplit
from mock import patch
from django_webtest import WebTest
from .auth import TestUserMixin
from .helpers import equal_call_args
from .fake_popit import (
FakePersonCollection, FakePostCollection, fake_mp_post_search_results
)
example_timestamp = '2014-09-29T10:11:59.216159'
example_version_id = '5aa6418325c1a0bb'
@patch('candidates.popit.PopIt')
@patch.object(FakePersonCollection, 'put')
class TestUpdatePersonView(TestUserMixin, WebTest):
def test_update_person_view_get_without_login(self, mocked_person_put, mock_popit):
response = self.app.get('/person/2009/update')
self.assertEqual(response.status_code, 302)
split_location = urlsplit(response.location)
self.assertEqual('/accounts/login/', split_location.path)
self.assertEqual('next=/person/2009/update', split_location.query)
self.assertFalse(mocked_person_put.called)
def test_update_person_view_get_refused_copyright(self, mocked_person_put, mock_popit):
response = self.app.get('/person/2009/update', user=self.user_refused)
self.assertEqual(response.status_code, 302)
split_location = urlsplit(response.location)
self.assertEqual('/copyright-question', split_location.path)
self.assertEqual('next=/person/2009/update', split_location.query)
self.assertFalse(mocked_person_put.called)
@patch('candidates.popit.requests')
def test_update_person_view_get(self, mock_requests, mocked_person_put, mock_popit):
mock_popit.return_value.persons = FakePersonCollection
mock_requests.get.side_effect = fake_mp_post_search_results
# For the moment just check that the form's actually there:
response = self.app.get('/person/2009/update', user=self.user)
response.forms['person-details']
self.assertFalse(mocked_person_put.called)
@patch('candidates.popit.requests')
@patch('candidates.views.version_data.get_current_timestamp')
@patch('candidates.views.version_data.create_version_id')
def test_update_person_submission_copyright_refused(
self,
mock_create_version_id,
mock_get_current_timestamp,
mock_requests,
mocked_person_put,
mock_popit):
mock_popit.return_value.persons = FakePersonCollection
mock_get_current_timestamp.return_value = example_timestamp
mock_create_version_id.return_value = example_version_id
mock_requests.get.side_effect = fake_mp_post_search_results
response = self.app.get('/person/2009/update', user=self.user)
form = response.forms['person-details']
form['wikipedia_url'] = 'http://en.wikipedia.org/wiki/Tessa_Jowell'
form['party_gb_2015'] = 'party:90'
form['party_ni_2015'] = 'party:none'
form['source'] = "Some source of this information"
submission_response = form.submit(user=self.user_refused)
split_location = urlsplit(submission_response.location)
self.assertEqual('/copyright-question', split_location.path)
self.assertEqual('next=/person/2009/update', split_location.query)
self.assertFalse(mocked_person_put.called)
@patch('candidates.popit.requests')
@patch('candidates.views.version_data.get_current_timestamp')
@patch('candidates.views.version_data.create_version_id')
def test_update_person_submission(
self,
mock_create_version_id,
mock_get_current_timestamp,
mock_requests,
mocked_person_put,
mock_popit):
mock_popit.return_value.persons = FakePersonCollection
mock_popit.return_value.posts = FakePostCollection
mock_get_current_timestamp.return_value = example_timestamp
mock_create_version_id.return_value = example_version_id
mock_requests.get.side_effect = fake_mp_post_search_results
response = self.app.get(
'/person/2009/update',
user=self.user_who_can_lock,
)
form = response.forms['person-details']
form['wikipedia_url'] = 'http://en.wikipedia.org/wiki/Tessa_Jowell'
form['party_gb_2015'] = 'party:90'
form['party_ni_2015'] = 'party:none'
form['source'] = "Some source of this information"
submission_response = form.submit()
expected_purging_put = {
u'slug': u'tessa-jowell',
u'contact_details': [],
u'name': u'Tessa Jowell',
u'links': [],
u'honorific_suffix': u'DBE',
u'url': u'http://candidates.127.0.0.1.xip.io:3000/api/v0.1/persons/2009',
u'gender':
u'female',
u'identifiers': [
{
u'scheme': u'yournextmp-candidate',
u'id': u'544e3df981b7fa64bfccdaac',
u'identifier': u'2009'
},
{
u'scheme': u'uk.org.publicwhip',
u'id': u'54d2d3725b6aac303dfcd68b',
u'identifier': u'uk.org.publicwhip/person/10326'
}
],
u'other_names': [],
u'html_url': u'http://candidates.127.0.0.1.xip.io:3000/persons/2009',
u'standing_in': None,
u'honorific_prefix': u'Ms',
u'phone': u'02086931826',
u'versions': [
{
'information_source': u'Some source of this information',
'timestamp': '2014-09-29T10:11:59.216159',
'username': u'charles',
'data': {
'facebook_page_url': u'',
'facebook_personal_url': u'',
'name': u'Tessa Jowell',
'honorific_suffix': u'DBE',
'party_ppc_page_url': u'',
'gender': u'female',
'image': None,
'identifiers': [
{
u'scheme': u'yournextmp-candidate',
u'id': u'544e3df981b7fa64bfccdaac',
u'identifier': u'2009'
},
{
u'scheme': u'uk.org.publicwhip',
u'id': u'54d2d3725b6aac303dfcd68b',
u'identifier': u'uk.org.publicwhip/person/10326'
}
],
'linkedin_url': u'',
'proxy_image': None,
'id': u'2009',
'other_names': [],
'honorific_prefix': u'Ms',
'standing_in': {
u'2015': {
'post_id': u'65808',
'name': u'Dulwich and West Norwood',
'mapit_url': 'http://mapit.mysociety.org/area/65808'
},
u'2010': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
}
},
'homepage_url': u'',
'twitter_username': u'',
'wikipedia_url': u'http://en.wikipedia.org/wiki/Tessa_Jowell',
'party_memberships': {
u'2015': {
'name': u'Liberal Democrats',
'id': u'party:90'
},
u'2010': {
u'id': u'party:53',
u'name': u'Labour Party'
}
},
'birth_date': None,
'email': u'[email protected]'
},
'version_id': '5aa6418325c1a0bb'
},
{
u'username': u'symroe',
u'information_source': u'Just adding example data',
u'timestamp': u'2014-10-28T14:32:36.835429',
u'version_id': u'35ec2d5821176ccc',
u'ip': u'127.0.0.1',
u'data': {
u'name': u'Tessa Jowell',
u'email': u'[email protected]',
u'twitter_username': u'',
u'standing_in': {
u'2015': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
},
u'2010': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
}
},
u'homepage_url': u'',
u'wikipedia_url': u'',
u'party_memberships': {
u'2015': {
u'id': u'party:53',
u'name': u'Labour Party'
},
u'2010': {
u'id': u'party:53',
u'name': u'Labour Party'
}
},
u'birth_date': None,
u'id': u'2009'
}
},
{
"data": {
"birth_date": None,
"email": "[email protected]",
"homepage_url": "",
"id": "2009",
"name": "Tessa Jowell",
"party_memberships": {
"2010": {
"id": "party:53",
"name": "Labour Party"
}
},
"standing_in": {
"2010": {
"mapit_url": "http://mapit.mysociety.org/area/65808",
"name": "Dulwich and West Norwood",
"post_id": "65808"
}
},
"twitter_username": "",
"wikipedia_url": ""
},
"information_source": "An initial version",
"ip": "127.0.0.1",
"timestamp": "2014-10-01T15:12:34.732426",
"username": "mark",
"version_id": "5469de7db0cbd155"
}
],
'birth_date': None,
u'party_memberships': None,
u'id': u'2009',
u'email': u'[email protected]'
}
self.assertTrue(
equal_call_args(
(expected_purging_put,),
mocked_person_put.call_args_list[0][0]
),
"the purging PUT was called with unexpected values"
)
expected_actual_put = {
u'slug': u'tessa-jowell',
u'contact_details': [],
u'name': u'Tessa Jowell',
u'links': [
{
'note': 'wikipedia',
'url': u'http://en.wikipedia.org/wiki/Tessa_Jowell'
}
],
u'honorific_suffix': u'DBE',
u'url': u'http://candidates.127.0.0.1.xip.io:3000/api/v0.1/persons/2009',
u'gender': u'female',
u'identifiers': [
{
u'scheme': u'yournextmp-candidate',
u'id': u'544e3df981b7fa64bfccdaac',
u'identifier': u'2009'
},
{
u'scheme': u'uk.org.publicwhip',
u'id': u'54d2d3725b6aac303dfcd68b',
u'identifier': u'uk.org.publicwhip/person/10326'
}
],
u'other_names': [],
u'html_url': u'http://candidates.127.0.0.1.xip.io:3000/persons/2009',
u'standing_in': {
u'2015': {
'post_id': u'65808',
'name': u'Dulwich and West Norwood',
'mapit_url': 'http://mapit.mysociety.org/area/65808'
},
u'2010': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
}
},
u'honorific_prefix': u'Ms',
u'phone': u'02086931826',
u'versions': [
{
'information_source': u'Some source of this information',
'timestamp': '2014-09-29T10:11:59.216159',
'username': u'charles',
'data': {
'facebook_page_url': u'',
'facebook_personal_url': u'',
'name': u'Tessa Jowell',
'honorific_suffix': u'DBE',
'party_ppc_page_url': u'',
'gender': u'female',
'image': None,
'identifiers': [
{
u'scheme': u'yournextmp-candidate',
u'id': u'544e3df981b7fa64bfccdaac',
u'identifier': u'2009'
},
{
u'scheme': u'uk.org.publicwhip',
u'id': u'54d2d3725b6aac303dfcd68b',
u'identifier': u'uk.org.publicwhip/person/10326'
}
],
'linkedin_url': u'',
'proxy_image': None,
'id': u'2009',
'other_names': [],
'honorific_prefix': u'Ms',
'standing_in': {
u'2015': {
'post_id': u'65808',
'name': u'Dulwich and West Norwood',
'mapit_url': 'http://mapit.mysociety.org/area/65808'
},
u'2010': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
}
},
'homepage_url': u'',
'twitter_username': u'',
'wikipedia_url': u'http://en.wikipedia.org/wiki/Tessa_Jowell',
'party_memberships': {
u'2015': {
'name': u'Liberal Democrats',
'id': u'party:90'
},
u'2010': {
u'id': u'party:53',
u'name': u'Labour Party'
}
},
'birth_date': None,
'email': u'[email protected]'
},
'version_id': '5aa6418325c1a0bb'
},
{
u'username': u'symroe',
u'information_source': u'Just adding example data',
u'timestamp': u'2014-10-28T14:32:36.835429',
u'version_id': u'35ec2d5821176ccc',
u'ip': u'127.0.0.1',
u'data': {
u'name': u'Tessa Jowell',
u'email': u'[email protected]',
u'twitter_username': u'',
u'standing_in': {
u'2015': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
},
u'2010': {
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
u'mapit_url': u'http://mapit.mysociety.org/area/65808'
}
},
u'homepage_url': u'',
u'wikipedia_url': u'',
u'party_memberships': {
u'2015': {
u'id': u'party:53',
u'name': u'Labour Party'
},
u'2010': {
u'id': u'party:53',
u'name': u'Labour Party'
}
},
u'birth_date': None,
u'id': u'2009'
}
},
{
"data": {
"birth_date": None,
"email": "[email protected]",
"homepage_url": "",
"id": "2009",
"name": "Tessa Jowell",
"party_memberships": {
"2010": {
"id": "party:53",
"name": "Labour Party"
}
},
"standing_in": {
"2010": {
"mapit_url": "http://mapit.mysociety.org/area/65808",
"name": "Dulwich and West Norwood",
"post_id": "65808"
}
},
"twitter_username": "",
"wikipedia_url": ""
},
"information_source": "An initial version",
"ip": "127.0.0.1",
"timestamp": "2014-10-01T15:12:34.732426",
"username": "mark",
"version_id": "5469de7db0cbd155"
}
],
'birth_date': None,
u'party_memberships': {
u'2015': {
'name': u'Liberal Democrats',
'id': u'party:90'
},
u'2010': {
u'id': u'party:53',
u'name': u'Labour Party'
}
},
u'id': u'2009',
u'email': u'[email protected]'
}
self.assertTrue(
equal_call_args(
(expected_actual_put,),
mocked_person_put.call_args_list[1][0]
),
"the actual PUT was called with unexpected values"
)
# It should redirect back to the same person's page:
split_location = urlsplit(submission_response.location)
self.assertEqual(
'/person/2009',
split_location.path
)
| agpl-3.0 |
brunogamacatao/portalsaladeaula | django/db/backends/sqlite3/introspection.py | 88 | 5864 | import re
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchone()
if not result:
continue
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in self._table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
| bsd-3-clause |
talumbau/blaze | blaze/io/sql/tests/testutils.py | 6 | 1040 | from __future__ import print_function, division, absolute_import
data = [
(4, "hello", 2.1),
(8, "world", 4.2),
(16, "!", 8.4),
]
def create_sqlite_table():
import sqlite3 as db
conn = db.connect(":memory:")
c = conn.cursor()
c.execute('''create table testtable
(i INTEGER, msg text, price real)''')
c.executemany("""insert into testtable
values (?, ?, ?)""", data)
conn.commit()
c.close()
return conn
#def create_sqlite_table():
# import pyodbc as db
# conn = db.connect("Driver=SQLite ODBC Driver "
# "NameDatabase=Database8;LongNames=0;Timeout=1000;"
# "NoTXN=0;SyncPragma=NORMAL;StepAPI=0;")
# #conn = db.connect("Data Source=:memory:;Version=3;New=True;")
# c = conn.cursor()
# c.execute('''create table testtable
# (i INTEGER, msg text, price real)''')
# c.executemany("""insert into testtable
# values (?, ?, ?)""", data)
# conn.commit()
# c.close()
#
# return conn
| bsd-3-clause |
276361270/sqlalchemy | test/orm/test_dynamic.py | 25 | 29418 | from sqlalchemy import testing, desc, select, func, exc, cast, Integer
from sqlalchemy.orm import (
mapper, relationship, create_session, Query, attributes, exc as orm_exc,
Session, backref, configure_mappers)
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import (
AssertsCompiledSQL, assert_raises_message, assert_raises, eq_, is_)
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'addresses': relationship(
Address, lazy="dynamic", **addresses_args)})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy="dynamic",
**items_args)})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='[email protected]')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
Address, addresses, properties={
'user': relationship(User, lazy='dynamic')})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='[email protected]'),
Address(email_address='[email protected]'),
Address(email_address='[email protected]')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='[email protected]'),
Address(email_address='[email protected]'),
Address(email_address='[email protected]')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='[email protected]'),
Address(email_address='[email protected]'),
Address(email_address='[email protected]')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='[email protected]'),
Address(email_address='[email protected]'),
Address(email_address='[email protected]')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(
id=7, addresses=[
Address(id=1, email_address='[email protected]')])])
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(
items_args={"backref": backref("orders", lazy="dynamic")})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude(
'mysql', 'between', ((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyAppenderQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
assert hasattr(col, 'add')
eq_(type(col).__name__, 'MyAppenderQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
assert not hasattr(q, 'add')
eq_(type(q).__name__, 'MyQuery')
class UOWTest(
_DynamicFixture, _fixtures.FixtureTest,
testing.AssertsExecutionResults):
run_inserts = None
def test_persistence(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture()
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='foo')
sess.add_all([u1, a1])
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0)
u1 = sess.query(User).get(u1.id)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
u1.addresses.remove(a1)
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0
)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
a2 = Address(email_address='bar')
u1.addresses.remove(a1)
u1.addresses.append(a2)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a2.id, u1.id, 'bar')]
)
def test_merge(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
u1.addresses.append(a2)
u1.addresses.append(a3)
sess.add_all([u1, a1])
sess.flush()
u1 = User(id=u1.id, name='jack')
u1.addresses.append(a1)
u1.addresses.append(a3)
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, 'addresses'), (
[a1],
[a3],
[a2]
))
sess.flush()
eq_(
list(u1.addresses),
[a1, a3]
)
def test_hasattr(self):
User, Address = self._user_address_fixture()
u1 = User(name='jack')
assert 'addresses' not in u1.__dict__
u1.addresses = [Address(email_address='test')]
assert 'addresses' in u1.__dict__
def test_collection_set(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session(autoflush=True, autocommit=False)
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
a4 = Address(email_address='a4')
sess.add(u1)
u1.addresses = [a1, a3]
eq_(list(u1.addresses), [a1, a3])
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_append(self):
# test that a load of User.addresses is not emitted
# when flushing an append
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.append(Address(email_address='a2'))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}]
)
)
def test_noload_remove(self):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address='a2')
u1.addresses.append(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
lambda ctx: [{'param_1': a2_id}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
),
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
)
def test_rollback(self):
User, Address = self._user_address_fixture()
sess = create_session(
expire_on_commit=False, autocommit=False, autoflush=True)
u1 = User(name='jack')
u1.addresses.append(Address(email_address='[email protected]'))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.append(Address(email_address='[email protected]'))
eq_(
u1.addresses.order_by(Address.id).all(),
[
Address(email_address='[email protected]'),
Address(email_address='[email protected]')
]
)
sess.rollback()
eq_(
u1.addresses.all(),
[Address(email_address='[email protected]')]
)
def _test_delete_cascade(self, expected):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update" if expected else "all, delete"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
sess.commit()
eq_(testing.db.scalar(addresses.count(addresses.c.user_id == None)), 0)
eq_(testing.db.scalar(addresses.count(addresses.c.user_id != None)), 6)
sess.delete(u)
sess.commit()
if expected:
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 6)
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 0)
else:
eq_(testing.db.scalar(addresses.count()), 0)
def test_delete_nocascade(self):
self._test_delete_cascade(True)
def test_delete_cascade(self):
self._test_delete_cascade(False)
def test_self_referential(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node, nodes, properties={
'children': relationship(
Node, lazy="dynamic", order_by=nodes.c.id)})
sess = Session()
n2, n3 = Node(), Node()
n1 = Node(children=[n2, n3])
sess.add(n1)
sess.commit()
eq_(n1.children.all(), [n2, n3])
def test_remove_orphans(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
for a in u.addresses.filter(
Address.email_address.in_(['c', 'e', 'f'])):
u.addresses.remove(a)
eq_(
set(ad for ad, in sess.query(Address.email_address)),
set(['a', 'b', 'd'])
)
def _backref_test(self, autoflush, saveuser):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
sess = create_session(autoflush=autoflush, autocommit=False)
u = User(name='buffy')
a = Address(email_address='[email protected]')
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(list(u.addresses), [a])
a.user = None
if not autoflush:
eq_(list(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(list(u.addresses), [])
def test_backref_autoflush_saveuser(self):
self._backref_test(True, True)
def test_backref_autoflush_savead(self):
self._backref_test(True, False)
def test_backref_saveuser(self):
self._backref_test(False, True)
def test_backref_savead(self):
self._backref_test(False, False)
def test_backref_events(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_no_deref(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user", })
session = create_session()
user = User()
user.name = 'joe'
user.fullname = 'Joe User'
user.password = 'Joe\'s secret'
address = Address()
address.email_address = '[email protected]'
address.user = user
session.add(user)
session.flush()
session.expunge_all()
def query1():
session = create_session(testing.db)
user = session.query(User).first()
return user.addresses.all()
def query2():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
def query3():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
eq_(query1(), [Address(email_address='[email protected]')])
eq_(query2(), [Address(email_address='[email protected]')])
eq_(query3(), [Address(email_address='[email protected]')])
class HistoryTest(_DynamicFixture, _fixtures.FixtureTest):
run_inserts = None
def _transient_fixture(self, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
def _persistent_fixture(self, autoflush=True, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User(name='u1')
a1 = Address(email_address='a1')
s = Session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
def _persistent_m2m_fixture(self, autoflush=True, items_args={}):
Order, Item = self._order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = Session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
eq_(
attributes.get_history(obj, attrname),
compare
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(obj, attrname,
attributes.LOAD_AGAINST_COMMITTED),
compare_passive
)
def test_append_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_append_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_remove_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [])
)
def test_backref_pop_transient(self):
u1, a1 = self._transient_fixture(addresses_args={"backref": "user"})
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], []),
)
a1.user = None
# removed from added
self._assert_history(u1,
([], [], []),
)
def test_remove_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [a1])
)
def test_backref_pop_persistent_autoflush_o2m_active_hist(self):
u1, a1, s = self._persistent_fixture(
addresses_args={"backref": backref("user", active_history=True)})
u1.addresses.append(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1,
([], [], [a1]),
)
def test_backref_pop_persistent_autoflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"})
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_backref_pop_persistent_noflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False)
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_unchanged_persistent(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture()
a2, a3 = Address(email_address='a2'), Address(email_address='a3')
u1.addresses.append(a1)
u1.addresses.append(a2)
s.flush()
u1.addresses.append(a3)
u1.addresses.remove(a2)
self._assert_history(u1,
([a3], [a1], [a2]),
compare_passive=([a3], [], [a2])
)
def test_replace_transient(self):
Address = self.classes.Address
u1, a1 = self._transient_fixture()
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_noflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=False)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_autoflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=True)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1])
)
def test_persistent_but_readded_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_persistent_but_readded_autoflush(self):
u1, a1, s = self._persistent_fixture(autoflush=True)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], []), compare_passive=([], [], [a1]))
| mit |
einstein95/crunchy-xml-decoder | crunchy-xml-decoder/unidecode/x0b8.py | 253 | 4714 | data = (
'reoss', # 0x00
'reong', # 0x01
'reoj', # 0x02
'reoc', # 0x03
'reok', # 0x04
'reot', # 0x05
'reop', # 0x06
'reoh', # 0x07
're', # 0x08
'reg', # 0x09
'regg', # 0x0a
'regs', # 0x0b
'ren', # 0x0c
'renj', # 0x0d
'renh', # 0x0e
'red', # 0x0f
'rel', # 0x10
'relg', # 0x11
'relm', # 0x12
'relb', # 0x13
'rels', # 0x14
'relt', # 0x15
'relp', # 0x16
'relh', # 0x17
'rem', # 0x18
'reb', # 0x19
'rebs', # 0x1a
'res', # 0x1b
'ress', # 0x1c
'reng', # 0x1d
'rej', # 0x1e
'rec', # 0x1f
'rek', # 0x20
'ret', # 0x21
'rep', # 0x22
'reh', # 0x23
'ryeo', # 0x24
'ryeog', # 0x25
'ryeogg', # 0x26
'ryeogs', # 0x27
'ryeon', # 0x28
'ryeonj', # 0x29
'ryeonh', # 0x2a
'ryeod', # 0x2b
'ryeol', # 0x2c
'ryeolg', # 0x2d
'ryeolm', # 0x2e
'ryeolb', # 0x2f
'ryeols', # 0x30
'ryeolt', # 0x31
'ryeolp', # 0x32
'ryeolh', # 0x33
'ryeom', # 0x34
'ryeob', # 0x35
'ryeobs', # 0x36
'ryeos', # 0x37
'ryeoss', # 0x38
'ryeong', # 0x39
'ryeoj', # 0x3a
'ryeoc', # 0x3b
'ryeok', # 0x3c
'ryeot', # 0x3d
'ryeop', # 0x3e
'ryeoh', # 0x3f
'rye', # 0x40
'ryeg', # 0x41
'ryegg', # 0x42
'ryegs', # 0x43
'ryen', # 0x44
'ryenj', # 0x45
'ryenh', # 0x46
'ryed', # 0x47
'ryel', # 0x48
'ryelg', # 0x49
'ryelm', # 0x4a
'ryelb', # 0x4b
'ryels', # 0x4c
'ryelt', # 0x4d
'ryelp', # 0x4e
'ryelh', # 0x4f
'ryem', # 0x50
'ryeb', # 0x51
'ryebs', # 0x52
'ryes', # 0x53
'ryess', # 0x54
'ryeng', # 0x55
'ryej', # 0x56
'ryec', # 0x57
'ryek', # 0x58
'ryet', # 0x59
'ryep', # 0x5a
'ryeh', # 0x5b
'ro', # 0x5c
'rog', # 0x5d
'rogg', # 0x5e
'rogs', # 0x5f
'ron', # 0x60
'ronj', # 0x61
'ronh', # 0x62
'rod', # 0x63
'rol', # 0x64
'rolg', # 0x65
'rolm', # 0x66
'rolb', # 0x67
'rols', # 0x68
'rolt', # 0x69
'rolp', # 0x6a
'rolh', # 0x6b
'rom', # 0x6c
'rob', # 0x6d
'robs', # 0x6e
'ros', # 0x6f
'ross', # 0x70
'rong', # 0x71
'roj', # 0x72
'roc', # 0x73
'rok', # 0x74
'rot', # 0x75
'rop', # 0x76
'roh', # 0x77
'rwa', # 0x78
'rwag', # 0x79
'rwagg', # 0x7a
'rwags', # 0x7b
'rwan', # 0x7c
'rwanj', # 0x7d
'rwanh', # 0x7e
'rwad', # 0x7f
'rwal', # 0x80
'rwalg', # 0x81
'rwalm', # 0x82
'rwalb', # 0x83
'rwals', # 0x84
'rwalt', # 0x85
'rwalp', # 0x86
'rwalh', # 0x87
'rwam', # 0x88
'rwab', # 0x89
'rwabs', # 0x8a
'rwas', # 0x8b
'rwass', # 0x8c
'rwang', # 0x8d
'rwaj', # 0x8e
'rwac', # 0x8f
'rwak', # 0x90
'rwat', # 0x91
'rwap', # 0x92
'rwah', # 0x93
'rwae', # 0x94
'rwaeg', # 0x95
'rwaegg', # 0x96
'rwaegs', # 0x97
'rwaen', # 0x98
'rwaenj', # 0x99
'rwaenh', # 0x9a
'rwaed', # 0x9b
'rwael', # 0x9c
'rwaelg', # 0x9d
'rwaelm', # 0x9e
'rwaelb', # 0x9f
'rwaels', # 0xa0
'rwaelt', # 0xa1
'rwaelp', # 0xa2
'rwaelh', # 0xa3
'rwaem', # 0xa4
'rwaeb', # 0xa5
'rwaebs', # 0xa6
'rwaes', # 0xa7
'rwaess', # 0xa8
'rwaeng', # 0xa9
'rwaej', # 0xaa
'rwaec', # 0xab
'rwaek', # 0xac
'rwaet', # 0xad
'rwaep', # 0xae
'rwaeh', # 0xaf
'roe', # 0xb0
'roeg', # 0xb1
'roegg', # 0xb2
'roegs', # 0xb3
'roen', # 0xb4
'roenj', # 0xb5
'roenh', # 0xb6
'roed', # 0xb7
'roel', # 0xb8
'roelg', # 0xb9
'roelm', # 0xba
'roelb', # 0xbb
'roels', # 0xbc
'roelt', # 0xbd
'roelp', # 0xbe
'roelh', # 0xbf
'roem', # 0xc0
'roeb', # 0xc1
'roebs', # 0xc2
'roes', # 0xc3
'roess', # 0xc4
'roeng', # 0xc5
'roej', # 0xc6
'roec', # 0xc7
'roek', # 0xc8
'roet', # 0xc9
'roep', # 0xca
'roeh', # 0xcb
'ryo', # 0xcc
'ryog', # 0xcd
'ryogg', # 0xce
'ryogs', # 0xcf
'ryon', # 0xd0
'ryonj', # 0xd1
'ryonh', # 0xd2
'ryod', # 0xd3
'ryol', # 0xd4
'ryolg', # 0xd5
'ryolm', # 0xd6
'ryolb', # 0xd7
'ryols', # 0xd8
'ryolt', # 0xd9
'ryolp', # 0xda
'ryolh', # 0xdb
'ryom', # 0xdc
'ryob', # 0xdd
'ryobs', # 0xde
'ryos', # 0xdf
'ryoss', # 0xe0
'ryong', # 0xe1
'ryoj', # 0xe2
'ryoc', # 0xe3
'ryok', # 0xe4
'ryot', # 0xe5
'ryop', # 0xe6
'ryoh', # 0xe7
'ru', # 0xe8
'rug', # 0xe9
'rugg', # 0xea
'rugs', # 0xeb
'run', # 0xec
'runj', # 0xed
'runh', # 0xee
'rud', # 0xef
'rul', # 0xf0
'rulg', # 0xf1
'rulm', # 0xf2
'rulb', # 0xf3
'ruls', # 0xf4
'rult', # 0xf5
'rulp', # 0xf6
'rulh', # 0xf7
'rum', # 0xf8
'rub', # 0xf9
'rubs', # 0xfa
'rus', # 0xfb
'russ', # 0xfc
'rung', # 0xfd
'ruj', # 0xfe
'ruc', # 0xff
)
| gpl-2.0 |
nnethercote/servo | components/script/dom/bindings/codegen/parser/tests/test_variadic_constraints.py | 170 | 1564 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface VariadicConstraints1 {
void foo(byte... arg1, byte arg2);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown on variadic argument followed by required "
"argument.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface VariadicConstraints2 {
void foo(byte... arg1, optional byte arg2);
};
""")
results = parser.finish();
except:
threw = True
harness.ok(threw,
"Should have thrown on variadic argument followed by optional "
"argument.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface VariadicConstraints3 {
void foo(optional byte... arg1);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown on variadic argument explicitly flagged as "
"optional.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface VariadicConstraints4 {
void foo(byte... arg1 = 0);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on variadic argument with default value.")
| mpl-2.0 |
tvalacarta/tvalacarta | python/main-classic/channels/dwspan.py | 1 | 8418 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------
# tvalacarta
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------------
# Canal para "Deustche Welle en español", creado por rsantaella
#------------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
from core import jsontools
CHANNELNAME = "dwspan"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.dwspan mainlist")
return programas(Item())
def directos(item=None):
logger.info("tvalacarta.channels.aragontv directos")
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="DW (Español)", url="http://dwstream3-lh.akamaihd.net/i/dwstream3_live@124409/master.m3u8", thumbnail="http://media.tvalacarta.info/canales/128x128/dwspan.png", category="Nacionales", action="play", folder=False ) )
return itemlist
def programas(item):
logger.info("tvalacarta.channels.dwspan programas")
itemlist = []
if item.url=="":
item.url = "http://www.dw.com/es/tv/emisiones-tv/s-9134"
'''
<div class="col1 mc">
<div class="news epg">
<div class="teaserImg">
<a href="/es/tv/claves/s-30468">
<img width="220"
height="124" border="0"
src="/image/15682255_301.jpg"/>
</a>
</div>
<div class="teaserContentWrap">
<a href="/es/tv/claves/s-30468">
<h2>Claves</h2>
<p>El mundo desde América Latina</p>
</a>
</div>
<ul class="smallList">
<li>
<strong>Actual</strong>
<a href="/es/claves-chile-educación-sexual-inhibida/av-36127035">Claves</a>
24.10.2016<span class="icon tv"></span> </li>
<li>
<a href="/es/multimedia/todos-los-contenidos/s-100838?type=18&programs=15605312">Todos los videos</a>
<div class="col1 mc">
<div class="news epg">
<div class="teaserImg">
<a href="/es/tv/life-links/s-101481">
<img width="220"
height="124" border="0"
src="/image/17923250_301.png"/>
</a>
</div>
<div class="teaserContentWrap">
<a href="/es/tv/life-links/s-101481">
<h2>Life Links</h2>
<p>Compartir realidades. Cambiar perspectivas.</p>
</a>
</div>
<ul class="smallList">
<li>
<strong>Actual</strong>
<a href="/es/life-links-headabovewater-con-el-agua-al-cuello-trabajar-en-un-barco/av-35880794">Life Links</a>
24.09.2016<span class="icon tv"></span> </li>
<li>
<a href="/es/multimedia/todos-los-contenidos/s-100838?type=18&programs=18365568">Todos los videos</a>
</li>
</ul>
</div>
</div>
'''
# Descarga la página
data = scrapertools.cache_page(item.url)
#logger.info(data)
pattern = '<div class="col1 mc[^<]+'
pattern += '<div class="news epg[^<]+'
pattern += '<div class="teaserImg[^<]+'
pattern += '<a href="([^"]+)">[^<]+'
pattern += '<img\s+width="\d+"\s+height="\d+"\s+border="\d+"\s+src="([^"]+)"[^<]+'
pattern += '</a[^<]+'
pattern += '</div[^<]+'
pattern += '<div class="teaserContentWrap"[^<]+'
pattern += '<a[^<]+'
pattern += '<h2>([^<]+)</h2>[^<]+'
pattern += '<p>([^<]+)</p'
matches = re.compile(pattern,re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
title = scrapedtitle
thumbnail = urlparse.urljoin( item.url , scrapedthumbnail )
url = urlparse.urljoin( item.url , scrapedurl.strip() )
plot = scrapedplot
# Appends a new item to the xbmc item list
itemlist.append( Item(channel=CHANNELNAME, title=title , action="episodios" , url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot , show = title, view="programs", folder=True) )
return itemlist
def detalle_programa(item):
logger.info("tvalacarta.channels.dwspan detalle_programa")
try:
data = scrapertools.cache_page(item.url)
item.plot = scrapertools.find_single_match(data,'<div class="col2 programClaimTeaser">.*?<p>(.*?)</div>')
item.plot = scrapertools.htmlclean( item.plot ).strip()
except:
import traceback
logger.info(traceback.format_exc())
return item
def episodios(item):
logger.info("tvalacarta.channels.dwspan episodios")
itemlist = []
#
'''
<div class="col1">
<div class="news searchres hov">
<a href="/es/life-links-readytofight-listos-para-pelear/av-19224025">
<div class="teaserImg tv">
<img border="0" width="220" height="124" src="/image/18378218_301.jpg" title="Life Links - #readytofight: Listos para pelear" alt="default" /> </div>
<h2>Life Links - #readytofight: Listos para pelear
<span class="date">30.04.2016
| 26:06 Minutos
</span>
<span class='icon tv'></span> </h2>
<p>Un imán, un exsalafista, un ex marine de EE. UU. A todos ellos les une una meta: luchar contra el extremismo y “Estado Islámico”.</p>
</a>
</div>
</div>
'''
if "pagenumber=" in item.url:
data_url = item.url
else:
data = scrapertools.cache_page(item.url)
# http://www.dw.com/es/multimedia/todos-los-contenidos/s-100838?type=18&programs=15535663
# http://www.dw.com/mediafilter/research?lang=es&type=18&programs=15535663&sort=date&results=32&showteasers=true&pagenumber=1
program_id = scrapertools.find_single_match(data,'<a href="http://www.dw.com/es/multimedia/todos-los-contenidos/s-100838.type=18&programs=([^"]+)"')
data_url = "http://www.dw.com/mediafilter/research?lang=es&type=18&programs="+program_id+"&sort=date&results=32&showteasers=true&pagenumber=1"
data = scrapertools.cache_page(data_url)
pattern = '<div class="col1"[^<]+'
pattern += '<div class="news searchres hov"[^<]+'
pattern += '<a href="([^"]+)"[^<]+'
pattern += '<div class="teaserImg tv"[^<]+'
pattern += '<img.*?src="([^"]+)"[^<]+</div>[^<]+'
pattern += '<h2>([^<]+)'
pattern += '<span class="date">(\d+\.\d+\.\d+)\s+\|\s+(\d+\:\d+)[^<]+'
pattern += '</span>[^<]+'
pattern += '<span[^<]+</span[^<]+</h2[^<]+'
pattern += '<p>([^<]+)</p>'
matches = re.compile(pattern,re.DOTALL).findall(data)
logger.info( repr(matches) )
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapeddate, duration, scrapedplot in matches:
title = scrapedtitle.strip()
thumbnail = urlparse.urljoin( item.url , scrapedthumbnail )
url = urlparse.urljoin( item.url , scrapedurl.strip() )
plot = scrapedplot
aired_date = scrapertools.parse_date(scrapeddate)
# Appends a new item to the xbmc item list
itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="dwspan", url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot , aired_date=aired_date, duration=duration, show=item.show, view="videos", folder=False) )
if len(itemlist)>0:
current_page = scrapertools.find_single_match(data_url,"pagenumber=(\d+)")
logger.info("current_page="+current_page)
next_page = str(int(current_page)+1)
logger.info("next_page="+next_page)
next_page_url = data_url.replace("pagenumber="+current_page,"pagenumber="+next_page)
logger.info("next_page_url="+next_page_url)
itemlist.append(Item(channel=CHANNELNAME, title=">> Página siguiente" , action="episodios" , url=next_page_url, show=item.show) )
return itemlist
def detalle_episodio(item):
item.geolocked = "0"
try:
from servers import dwspan as servermodule
video_urls = servermodule.get_video_url(item.url)
item.media_url = video_urls[0][1]
except:
import traceback
print traceback.format_exc()
item.media_url = ""
return item
def play(item):
item.server="dwspan";
itemlist = [item]
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
items_programas = mainlist(Item())
for item_programa in items_programas:
items_episodios = episodios(item_programa)
if len(items_episodios)>0:
return True
return False
| gpl-3.0 |
zarboz/Monarudo_M7_port | arm-cortex_a15-linux-gnueabi/share/gdb/python/gdb/printing.py | 137 | 10191 | # Pretty-printer utilities.
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with pretty-printers."""
import gdb
import gdb.types
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class PrettyPrinter(object):
"""A basic pretty-printer.
Attributes:
name: A unique string among all printers for the context in which
it is defined (objfile, progspace, or global(gdb)), and should
meaningfully describe what can be pretty-printed.
E.g., "StringPiece" or "protobufs".
subprinters: An iterable object with each element having a `name'
attribute, and, potentially, "enabled" attribute.
Or this is None if there are no subprinters.
enabled: A boolean indicating if the printer is enabled.
Subprinters are for situations where "one" pretty-printer is actually a
collection of several printers. E.g., The libstdc++ pretty-printer has
a pretty-printer for each of several different types, based on regexps.
"""
# While one might want to push subprinters into the subclass, it's
# present here to formalize such support to simplify
# commands/pretty_printers.py.
def __init__(self, name, subprinters=None):
self.name = name
self.subprinters = subprinters
self.enabled = True
def __call__(self, val):
# The subclass must define this.
raise NotImplementedError("PrettyPrinter __call__")
class SubPrettyPrinter(object):
"""Baseclass for sub-pretty-printers.
Sub-pretty-printers needn't use this, but it formalizes what's needed.
Attributes:
name: The name of the subprinter.
enabled: A boolean indicating if the subprinter is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def register_pretty_printer(obj, printer, replace=False):
"""Register pretty-printer PRINTER with OBJ.
The printer is added to the front of the search list, thus one can override
an existing printer if one needs to. Use a different name when overriding
an existing printer, otherwise an exception will be raised; multiple
printers with the same name are disallowed.
Arguments:
obj: Either an objfile, progspace, or None (in which case the printer
is registered globally).
printer: Either a function of one argument (old way) or any object
which has attributes: name, enabled, __call__.
replace: If True replace any existing copy of the printer.
Otherwise if the printer already exists raise an exception.
Returns:
Nothing.
Raises:
TypeError: A problem with the type of the printer.
ValueError: The printer's name contains a semicolon ";".
RuntimeError: A printer with the same name is already registered.
If the caller wants the printer to be listable and disableable, it must
follow the PrettyPrinter API. This applies to the old way (functions) too.
If printer is an object, __call__ is a method of two arguments:
self, and the value to be pretty-printed. See PrettyPrinter.
"""
# Watch for both __name__ and name.
# Functions get the former for free, but we don't want to use an
# attribute named __foo__ for pretty-printers-as-objects.
# If printer has both, we use `name'.
if not hasattr(printer, "__name__") and not hasattr(printer, "name"):
raise TypeError("printer missing attribute: name")
if hasattr(printer, "name") and not hasattr(printer, "enabled"):
raise TypeError("printer missing attribute: enabled")
if not hasattr(printer, "__call__"):
raise TypeError("printer missing attribute: __call__")
if obj is None:
if gdb.parameter("verbose"):
gdb.write("Registering global %s pretty-printer ...\n" % name)
obj = gdb
else:
if gdb.parameter("verbose"):
gdb.write("Registering %s pretty-printer for %s ...\n" %
(printer.name, obj.filename))
if hasattr(printer, "name"):
if not isinstance(printer.name, basestring):
raise TypeError("printer name is not a string")
# If printer provides a name, make sure it doesn't contain ";".
# Semicolon is used by the info/enable/disable pretty-printer commands
# to delimit subprinters.
if printer.name.find(";") >= 0:
raise ValueError("semicolon ';' in printer name")
# Also make sure the name is unique.
# Alas, we can't do the same for functions and __name__, they could
# all have a canonical name like "lookup_function".
# PERF: gdb records printers in a list, making this inefficient.
i = 0
for p in obj.pretty_printers:
if hasattr(p, "name") and p.name == printer.name:
if replace:
del obj.pretty_printers[i]
break
else:
raise RuntimeError("pretty-printer already registered: %s" %
printer.name)
i = i + 1
obj.pretty_printers.insert(0, printer)
class RegexpCollectionPrettyPrinter(PrettyPrinter):
"""Class for implementing a collection of regular-expression based pretty-printers.
Intended usage:
pretty_printer = RegexpCollectionPrettyPrinter("my_library")
pretty_printer.add_printer("myclass1", "^myclass1$", MyClass1Printer)
...
pretty_printer.add_printer("myclassN", "^myclassN$", MyClassNPrinter)
register_pretty_printer(obj, pretty_printer)
"""
class RegexpSubprinter(SubPrettyPrinter):
def __init__(self, name, regexp, gen_printer):
super(RegexpCollectionPrettyPrinter.RegexpSubprinter, self).__init__(name)
self.regexp = regexp
self.gen_printer = gen_printer
self.compiled_re = re.compile(regexp)
def __init__(self, name):
super(RegexpCollectionPrettyPrinter, self).__init__(name, [])
def add_printer(self, name, regexp, gen_printer):
"""Add a printer to the list.
The printer is added to the end of the list.
Arguments:
name: The name of the subprinter.
regexp: The regular expression, as a string.
gen_printer: A function/method that given a value returns an
object to pretty-print it.
Returns:
Nothing.
"""
# NOTE: A previous version made the name of each printer the regexp.
# That makes it awkward to pass to the enable/disable commands (it's
# cumbersome to make a regexp of a regexp). So now the name is a
# separate parameter.
self.subprinters.append(self.RegexpSubprinter(name, regexp,
gen_printer))
def __call__(self, val):
"""Lookup the pretty-printer for the provided value."""
# Get the type name.
typename = gdb.types.get_basic_type(val.type).tag
if not typename:
return None
# Iterate over table of type regexps to determine
# if a printer is registered for that type.
# Return an instantiation of the printer if found.
for printer in self.subprinters:
if printer.enabled and printer.compiled_re.search(typename):
return printer.gen_printer(val)
# Cannot find a pretty printer. Return None.
return None
# A helper class for printing enum types. This class is instantiated
# with a list of enumerators to print a particular Value.
class _EnumInstance:
def __init__(self, enumerators, val):
self.enumerators = enumerators
self.val = val
def to_string(self):
flag_list = []
v = long(self.val)
any_found = False
for (e_name, e_value) in self.enumerators:
if v & e_value != 0:
flag_list.append(e_name)
v = v & ~e_value
any_found = True
if not any_found or v != 0:
# Leftover value.
flag_list.append('<unknown: 0x%x>' % v)
return "0x%x [%s]" % (self.val, " | ".join(flag_list))
class FlagEnumerationPrinter(PrettyPrinter):
"""A pretty-printer which can be used to print a flag-style enumeration.
A flag-style enumeration is one where the enumerators are or'd
together to create values. The new printer will print these
symbolically using '|' notation. The printer must be registered
manually. This printer is most useful when an enum is flag-like,
but has some overlap. GDB's built-in printing will not handle
this case, but this printer will attempt to."""
def __init__(self, enum_type):
super(FlagEnumerationPrinter, self).__init__(enum_type)
self.initialized = False
def __call__(self, val):
if not self.initialized:
self.initialized = True
flags = gdb.lookup_type(self.name)
self.enumerators = []
for field in flags.fields():
self.enumerators.append((field.name, field.enumval))
# Sorting the enumerators by value usually does the right
# thing.
self.enumerators.sort(key = lambda x: x.enumval)
if self.enabled:
return _EnumInstance(self.enumerators, val)
else:
return None
| gpl-2.0 |
GeoCat/QGIS | python/plugins/processing/algs/qgis/DropMZValues.py | 1 | 3225 | # -*- coding: utf-8 -*-
"""
***************************************************************************
DropMZValues.py
--------------
Date : July 2017
Copyright : (C) 2017 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'July 2017'
__copyright__ = '(C) 2017, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
import os
from qgis.core import (QgsGeometry,
QgsWkbTypes,
QgsProcessingParameterBoolean)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class DropMZValues(QgisFeatureBasedAlgorithm):
DROP_M_VALUES = 'DROP_M_VALUES'
DROP_Z_VALUES = 'DROP_Z_VALUES'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.drop_m = False
self.drop_z = False
def name(self):
return 'dropmzvalues'
def displayName(self):
return self.tr('Drop M/Z values')
def outputName(self):
return self.tr('Z/M Dropped')
def tags(self):
return self.tr('drop,set,convert,m,measure,z,25d,3d,values').split(',')
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterBoolean(self.DROP_M_VALUES,
self.tr('Drop M Values'), defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.DROP_Z_VALUES,
self.tr('Drop Z Values'), defaultValue=False))
def outputWkbType(self, inputWkb):
wkb = inputWkb
if self.drop_m:
wkb = QgsWkbTypes.dropM(wkb)
if self.drop_z:
wkb = QgsWkbTypes.dropZ(wkb)
return wkb
def prepareAlgorithm(self, parameters, context, feedback):
self.drop_m = self.parameterAsBool(parameters, self.DROP_M_VALUES, context)
self.drop_z = self.parameterAsBool(parameters, self.DROP_Z_VALUES, context)
return True
def processFeature(self, feature, feedback):
input_geometry = feature.geometry()
if input_geometry:
new_geom = input_geometry.geometry().clone()
if self.drop_m:
new_geom.dropMValue()
if self.drop_z:
new_geom.dropZValue()
feature.setGeometry(QgsGeometry(new_geom))
return feature
| gpl-2.0 |
archen/django | django/contrib/gis/db/models/query.py | 6 | 36358 | from django.db import connections
from django.db.models.query import QuerySet, ValuesQuerySet, ValuesListQuerySet
from django.contrib.gis import memoryview
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import get_srid_info, PointField, LineStringField
from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.utils import six
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
### Methods overloaded from QuerySet ###
def __init__(self, model=None, query=None, using=None, hints=None):
super(GeoQuerySet, self).__init__(model=model, query=query, using=using, hints=hints)
self.query = query or GeoQuery(self.model)
def values(self, *fields):
return self._clone(klass=GeoValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=GeoValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Peforming setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analagous to a union operation, but much faster because
boundaries are not dissolved.
"""
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geomtry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
# Setting the options flag -- which depends on which version of
# PostGIS we're using. SpatiaLite only uses the first group of options.
if backend.spatial_version >= (1, 4, 0):
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
else:
options = 0
if crs and bbox:
options = 3
elif crs:
options = 1
elif bbox:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
# PostGIS AsGML() aggregate function parameter order depends on the
# version -- uggh.
if backend.spatial_version > (1, 3, 1):
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
else:
s['procedure_fmt'] = '%(geo_col)s,%(precision)s,%(version)s'
s['procedure_args'] = {'precision': precision, 'version': version}
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
tmp, geo_field = self._spatial_setup('transform', field_name=field_name)
# Getting the selection SQL for the given geographic field.
field_col = self._geocol_select(geo_field, field_name)
# Why cascading substitutions? Because spatial backends like
# Oracle and MySQL already require a function call to convert to text, thus
# when there's also a transformation we need to cascade the substitutions.
# For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )'
geo_col = self.query.custom_select.get(geo_field, field_col)
# Setting the key for the field's column with the custom SELECT SQL to
# override the geometry column returned from the database.
custom_sel = '%s(%s, %s)' % (connections[self.db].ops.transform, geo_col, srid)
# TODO: Should we have this as an alias?
# custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name))
self.query.transformed_srid = srid # So other GeoQuerySet methods
self.query.custom_select[geo_field] = custom_sel
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
return self._spatial_aggregate(aggregates.Union, **kwargs)
### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
backend = connection.ops
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
sel_fld = settings['select_field']
if isinstance(sel_fld, GeomField) and backend.select:
self.query.custom_select[model_att] = backend.select
if connection.ops.oracle:
sel_fld.empty_strings_allowed = False
self.query.extra_select_fields[model_att] = sel_fld
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
return self.extra(select={model_att: fmt % settings['procedure_args']},
select_params=settings['select_params'])
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
if self.query.transformed_srid:
u, unit_name, s = get_srid_info(self.query.transformed_srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if backend.spatialite and geodetic:
raise ValueError('SQLite does not support linear distance calculations on geodetic coordinate systems.')
if distance:
if self.query.transformed_srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, self.query.transformed_srid)
if geom.srid is None or geom.srid == self.query.transformed_srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, self.query.transformed_srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += ', %s(%s(%%%%s, %s), %s)' % (backend.transform, backend.from_text,
geom.srid, self.query.transformed_srid)
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, self.query.transformed_srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError('Spherical distance calculation only supported with Point Geometry parameters')
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and backend.postgis:
# Use 3D variants of perimeter and length routines on PostGIS.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
opts = self.model._meta
if not geo_field in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
self.query.add_select_related([field_name])
compiler = self.query.get_compiler(self.db)
compiler.pre_sql_setup()
for (rel_table, rel_col), field in self.query.related_select_cols:
if field == geo_field:
return compiler._field_column(geo_field, rel_table)
raise ValueError("%r not in self.query.related_select_cols" % geo_field)
elif not geo_field in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
tmp_fld, parent_model, direct, m2m = opts.get_field_by_name(geo_field.name)
return self.query.get_compiler(self.db)._field_column(geo_field, parent_model._meta.db_table)
else:
return self.query.get_compiler(self.db)._field_column(geo_field)
class GeoValuesQuerySet(ValuesQuerySet):
def __init__(self, *args, **kwargs):
super(GeoValuesQuerySet, self).__init__(*args, **kwargs)
# This flag tells `resolve_columns` to run the values through
# `convert_values`. This ensures that Geometry objects instead
# of string values are returned with `values()` or `values_list()`.
self.query.geo_values = True
class GeoValuesListQuerySet(GeoValuesQuerySet, ValuesListQuerySet):
pass
| bsd-3-clause |
hurricup/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/models/sql/aggregates.py | 309 | 1804 | from django.db.models.sql.aggregates import *
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql.conversion import GeomField
class GeoAggregate(Aggregate):
# Default SQL template for spatial aggregates.
sql_template = '%(function)s(%(field)s)'
# Conversion class, if necessary.
conversion_class = None
# Flags for indicating the type of the aggregate.
is_extent = False
def __init__(self, col, source=None, is_summary=False, tolerance=0.05, **extra):
super(GeoAggregate, self).__init__(col, source, is_summary, **extra)
# Required by some Oracle aggregates.
self.tolerance = tolerance
# Can't use geographic aggregates on non-geometry fields.
if not isinstance(self.source, GeometryField):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if connection.ops.oracle:
self.extra['tolerance'] = self.tolerance
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
sql_template, sql_function = connection.ops.spatial_aggregate_sql(self)
params = {
'function': sql_function,
'field': field_name
}
params.update(self.extra)
return sql_template % params
class Collect(GeoAggregate):
pass
class Extent(GeoAggregate):
is_extent = '2D'
class Extent3D(GeoAggregate):
is_extent = '3D'
class MakeLine(GeoAggregate):
pass
class Union(GeoAggregate):
pass
| apache-2.0 |
Mauricio3000/maya_testing | lib/joints.py | 1 | 1512 | import pymel.core as pm
import errors
'''
Test methods for joints in Maya
'''
def assertAimAxis(jnt=None, aim=None):
'''Assert the given axis is the axis aiming at child nodes
Attributes:
jnt -- Joint in scene to check
aim -- Character "x"|"y"|"z". Axis expected to aim at child
'''
if not isinstance(jnt, pm.nt.Joint):
raise errors.InputError('jnt', jnt, pm.nt.Joint)
if aim.lower() not in ['x', 'y', 'z']:
raise errors.InputError('aim', aim, '"x", "y" or "z"')
aim = aim.lower()
children = jnt.getChildren()
if not children:
raise errors.ObjectError(jnt, 'Child nodes', children)
if len(children) > 1:
raise errors.ObjectError(jnt, 'One child', children)
# Place locator as child to jnt and zero it
loc = pm.spaceLocator()
pm.parent(loc, jnt)
loc.setTranslation(0)
loc.setRotation([0, 0, 0])
# Get magnitude of vector to child
jnt_pos = pm.dt.Vector(pm.xform(jnt, q=1, ws=1, t=1))
chld_pos = pm.dt.Vector(pm.xform(children[0], q=1, ws=1, t=1))
vec = chld_pos - jnt_pos
# Move it along expected axis
if aim == 'x':
loc.tx.set(vec.length())
if aim == 'y':
loc.ty.set(vec.length())
if aim == 'z':
loc.tz.set(vec.length())
loc_pos = pm.xform(loc, q=1, ws=1, t=1)
# Remove locator from the scene
pm.delete(loc)
for l, p in zip(loc_pos, chld_pos):
if round(l, 6) != round(p, 6):
return False
return True
| gpl-3.0 |
caphrim007/ansible-modules-extras | cloud/google/gce_tag.py | 53 | 6397 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
module: gce_tag
version_added: "2.0"
short_description: add or remove tag(s) to/from GCE instance
description:
- This module can add or remove tags U(https://cloud.google.com/compute/docs/instances/#tags)
to/from GCE instance.
options:
instance_name:
description:
- the name of the GCE instance to add/remove tags
required: true
default: null
aliases: []
tags:
description:
- comma-separated list of tags to add or remove
required: true
default: null
aliases: []
state:
description:
- desired state of the tags
required: false
default: "present"
choices: ["present", "absent"]
aliases: []
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
aliases: []
service_account_email:
description:
- service account email
required: false
default: null
aliases: []
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Do Hoang Khiem ([email protected])"
'''
EXAMPLES = '''
# Add tags 'http-server', 'https-server', 'staging' to instance name 'staging-server' in zone us-central1-a.
- gce_tag:
instance_name: staging-server
tags: http-server,https-server,staging
zone: us-central1-a
state: present
# Remove tags 'foo', 'bar' from instance 'test-server' in default zone (us-central1-a)
- gce_tag:
instance_name: test-server
tags: foo,bar
state: absent
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, InvalidRequestError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def add_tags(gce, module, instance_name, tags):
"""Add tags to instance."""
zone = module.params.get('zone')
if not instance_name:
module.fail_json(msg='Must supply instance_name', changed=False)
if not tags:
module.fail_json(msg='Must supply tags', changed=False)
tags = [x.lower() for x in tags]
try:
node = gce.ex_get_node(instance_name, zone=zone)
except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
node_tags = node.extra['tags']
changed = False
tags_changed = []
for t in tags:
if t not in node_tags:
changed = True
node_tags.append(t)
tags_changed.append(t)
if not changed:
return False, None
try:
gce.ex_set_node_tags(node, node_tags)
return True, tags_changed
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
def remove_tags(gce, module, instance_name, tags):
"""Remove tags from instance."""
zone = module.params.get('zone')
if not instance_name:
module.fail_json(msg='Must supply instance_name', changed=False)
if not tags:
module.fail_json(msg='Must supply tags', changed=False)
tags = [x.lower() for x in tags]
try:
node = gce.ex_get_node(instance_name, zone=zone)
except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
node_tags = node.extra['tags']
changed = False
tags_changed = []
for t in tags:
if t in node_tags:
node_tags.remove(t)
changed = True
tags_changed.append(t)
if not changed:
return False, None
try:
gce.ex_set_node_tags(node, node_tags)
return True, tags_changed
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
instance_name=dict(required=True),
tags=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(),
project_id=dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support is required.')
instance_name = module.params.get('instance_name')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
if not zone:
module.fail_json(msg='Must specify "zone"', changed=False)
if not tags:
module.fail_json(msg='Must specify "tags"', changed=False)
gce = gce_connect(module)
# add tags to instance.
if state == 'present':
changed, tags_changed = add_tags(gce, module, instance_name, tags)
# remove tags from instance
if state == 'absent':
changed, tags_changed = remove_tags(gce, module, instance_name, tags)
module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
axiom-data-science/pyaxiom | pyaxiom/tests/dsg/profile/test_profile_im.py | 1 | 1768 | # -*- coding: utf-8 -*-
import os
import unittest
from dateutil.parser import parse as dtparse
import numpy as np
from pyaxiom.netcdf.sensors.dsg import IncompleteMultidimensionalProfile
import logging
from pyaxiom import logger
logger.level = logging.DEBUG
logger.handlers = [logging.StreamHandler()]
class TestIncompleteMultidimensionalProfile(unittest.TestCase):
def setUp(self):
self.multi = os.path.join(os.path.dirname(__file__), 'resources', 'im-multiple.nc')
def test_imp_load(self):
IncompleteMultidimensionalProfile(self.multi).close()
def test_imp_dataframe(self):
with IncompleteMultidimensionalProfile(self.multi) as ncd:
ncd.to_dataframe()
def test_imp_calculated_metadata(self):
with IncompleteMultidimensionalProfile(self.multi) as ncd:
m = ncd.calculated_metadata()
assert m.min_t == dtparse('1990-01-01 00:00:00')
assert m.max_t == dtparse('1990-01-06 21:00:00')
assert len(m.profiles.keys()) == 137
assert np.isclose(m.profiles[0].min_z, 0.05376)
assert np.isclose(m.profiles[0].max_z, 9.62958)
assert m.profiles[0].t == dtparse('1990-01-01 00:00:00')
assert m.profiles[0].x == 119
assert m.profiles[0].y == 171
assert np.isclose(m.profiles[141].min_z, 0.04196)
assert np.isclose(m.profiles[141].max_z, 9.85909)
assert m.profiles[141].t == dtparse('1990-01-06 21:00:00')
assert m.profiles[141].x == 34
assert m.profiles[141].y == 80
for n, v in ncd.variables.items():
assert np.issubdtype(v.dtype, np.int64) is False
assert np.issubdtype(v.dtype, np.uint64) is False
| mit |
cmunk/protwis | alignment/functions.py | 1 | 9498 | """
A set of utility functions for alignment processing.
"""
import re
from collections import OrderedDict
from common import definitions
from protein.models import Protein
def strip_html_tags(text):
"""
Remove the html tags from a string.
@param: text - string to clean up
"""
return re.sub('<.*?>', '', text)
def get_format_props(freq=None, freq_gs=None, res=None, feat=None):
"""
Get the excel cell format for residues/frequencies.
@param: freq - get the format for feature/aa frequency formatting
@param: res - get the format for residue colouring (alignment)
"""
residue = {
'A': {
'bg_color': '#E6E600',
'font_color': '#000000',
},
'C': {
'bg_color': '#B2B548',
'font_color': '#000000',
},
'D': {
'bg_color': '#E60A0A',
'font_color': '#FDFF7B',
},
'E': {
'bg_color': '#E60A0A',
'font_color': '#FDFF7B',
},
'F': {
'bg_color': '#18FF0B',
'font_color': '#000000',
},
'G': {
'bg_color': '#FF00F2',
'font_color': '#000000',
},
'H': {
'bg_color': '#0093DD',
'font_color': '#000000',
},
'I': {
'bg_color': '#E6E600',
'font_color': '#000000',
},
'K': {
'bg_color': '#145AFF',
'font_color': '#FDFF7B',
},
'L': {
'bg_color': '#E6E600',
'font_color': '#000000',
},
'M': {
'bg_color': '#E6E600',
'font_color': '#000000',
},
'N': {
'bg_color': '#A70CC6',
'font_color': '#FDFF7B',
},
'P': {
'bg_color': '#CC0099',
'font_color': '#FDFF7B',
},
'Q': {
'bg_color': '#A70CC6',
'font_color': '#FDFF7B',
},
'R': {
'bg_color': '#145AFF',
'font_color': '#FDFF7B',
},
'S': {
'bg_color': '#A70CC6',
'font_color': '#FDFF7B',
},
'T': {
'bg_color': '#A70CC6',
'font_color': '#FDFF7B',
},
'V': {
'bg_color': '#E6E600',
'font_color': '#000000',
},
'W': {
'bg_color': '#0BCF00',
'font_color': '#000000',
},
'Y': {
'bg_color': '#18FF0B',
'font_color': '#000000',
},
'-': {
'bg_color': '#FFFFFF',
'font_color': '#000000',
},
'_': {
'bg_color': '#EDEDED',
'font_color': '#000000',
},
'+': {
'bg_color': '#FFFFFF',
'font_color': '#000000',
}
}
properties = {
-1: {
'bg_color': '#FFFFFF',
},
0: {
'bg_color': '#ff0000',
},
1: {
'bg_color': '#ff3300',
},
2: {
'bg_color': '#ff6600',
},
3: {
'bg_color': '#ff9900',
},
4: {
'bg_color': '#ffcc00',
},
5: {
'bg_color': '#ffff00',
},
6: {
'bg_color': '#ccff00',
},
7: {
'bg_color': '#99ff00',
},
8: {
'bg_color': '#66ff00',
},
9: {
'bg_color': '#33ff00',
},
10: {
'bg_color': '#00ff00',
},
}
properties_gs = {
0: {
'bg_color': '#ffffff',
},
1: {
'bg_color': '#e0e0e0',
},
2: {
'bg_color': '#d0d0d0',
},
3: {
'bg_color': '#c0c0c0',
},
4: {
'bg_color': '#b0b0b0',
'font_color': '#ffffff',
},
5: {
'bg_color': '#a0a0a0',
'font_color': '#ffffff',
},
6: {
'bg_color': '#909090',
'font_color': '#ffffff',
},
7: {
'bg_color': '#808080',
'font_color': '#ffffff',
},
8: {
'bg_color': '#707070',
'font_color': '#ffffff',
},
9: {
'bg_color': '#606060',
'font_color': '#ffffff',
},
10: {
'bg_color': '#505050',
'font_color': '#ffffff',
},
}
property_group = {
'HY': {
'bg_color': '#93d050'
},
'HA': {
'bg_color': '#ffff00',
},
'M': {
'bg_color': '#ffff00',
},
'A': {
'bg_color': '#ffff00',
},
'I': {
'bg_color': '#ffff00',
},
'L': {
'bg_color': '#ffff00',
},
'V': {
'bg_color': '#ffff00',
},
'HR': {
'bg_color': '#07b050',
},
'W': {
'bg_color': '#07b050',
},
'Y': {
'bg_color': '#07b050',
},
'F': {
'bg_color': '#07b050',
},
'Hb': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'N': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'Q': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'S': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'T': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'Hu': {
'bg_color': '#7030a0',
'font_color': '#ffffff',
},
'Ha': {
'bg_color': '#7030a0',
'font_color': '#ff0000',
},
'Hd': {
'bg_color': '#7030a0',
# 'font_color': '#0070c0',
'font_color': '#02b0f0',
},
'+-': {
'bg_color': '#0070c0',
'font_color': '#ff0000',
},
'+': {
'bg_color': '#0070c0',
'font_color': '#000000',
},
'H': {
'bg_color': '#0070c0',
'font_color': '#000000',
},
'K': {
'bg_color': '#0070c0',
'font_color': '#000000',
},
'R': {
'bg_color': '#0070c0',
'font_color': '#000000',
},
'-': {
'bg_color': '#ff0000',
},
'D': {
'bg_color': '#ff0000',
},
'E': {
'bg_color': '#ff0000',
},
'Sm': {
'bg_color': '#ffffff',
},
'aH': {
'bg_color': '#d9d9d9',
},
'G': {
'bg_color': '#ff02ff',
},
'P': {
'bg_color': '#d603ff',
'font_color': '#ffffff',
},
'C': {
'bg_color': '#bf8f00',
},
}
if freq is not None:
try:
return properties[freq]
except KeyError:
return properties[int(freq)]
elif freq_gs is not None:
try:
return properties_gs[freq_gs]
except KeyError:
return properties_gs[int(freq_gs)]
elif res is not None:
return residue[res]
elif feat is not None:
print(feat)
try:
print(property_group[feat])
return property_group[feat]
except KeyError as msg:
return {'bg_color': '#ffffff'}
def get_proteins_from_selection(simple_selection):
proteins = []
# flatten the selection into individual proteins
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
elif target.type == 'family':
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list), source__in=(protein_source_list)).select_related(
'residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
return proteins
def prepare_aa_group_preference():
pref_dict = {}
lengths = {}
for row, group in enumerate(definitions.AMINO_ACID_GROUPS.items()):
tmp_len = len(group[1])
try:
lengths[tmp_len].append(row)
except KeyError:
lengths[tmp_len] = [row,]
l_heap = sorted(lengths.keys())
while l_heap:
tmp = l_heap.pop()
for feat_row in lengths[tmp]:
pref_dict[feat_row] = []
for pref_feat in l_heap:
pref_dict[feat_row].extend(lengths[pref_feat])
return pref_dict | apache-2.0 |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/twisted/plugins/twisted_reactors.py | 1 | 1939 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import absolute_import, division
from twisted.application.reactors import Reactor
from twisted.python.compat import _PY3
default = Reactor(
'default', 'twisted.internet.default',
'A reasonable default: poll(2) if available, otherwise select(2).')
select = Reactor(
'select', 'twisted.internet.selectreactor', 'select(2)-based reactor.')
poll = Reactor(
'poll', 'twisted.internet.pollreactor', 'poll(2)-based reactor.')
epoll = Reactor(
'epoll', 'twisted.internet.epollreactor', 'epoll(4)-based reactor.')
kqueue = Reactor(
'kqueue', 'twisted.internet.kqreactor', 'kqueue(2)-based reactor.')
cf = Reactor(
'cf' , 'twisted.internet.cfreactor',
'CoreFoundation integration reactor.')
__all__ = [
"default", "select", "poll", "epoll", "kqueue", "cf",
]
if _PY3:
asyncio = Reactor(
'asyncio', 'twisted.internet.asyncioreactor',
'asyncio integration reactor')
__all__.extend([
"asyncio"
])
else:
wx = Reactor(
'wx', 'twisted.internet.wxreactor', 'wxPython integration reactor.')
gi = Reactor(
'gi', 'twisted.internet.gireactor',
'GObject Introspection integration reactor.')
gtk3 = Reactor(
'gtk3', 'twisted.internet.gtk3reactor', 'Gtk3 integration reactor.')
gtk2 = Reactor(
'gtk2', 'twisted.internet.gtk2reactor', 'Gtk2 integration reactor.')
glib2 = Reactor(
'glib2', 'twisted.internet.glib2reactor',
'GLib2 event-loop integration reactor.')
win32er = Reactor(
'win32', 'twisted.internet.win32eventreactor',
'Win32 WaitForMultipleObjects-based reactor.')
iocp = Reactor(
'iocp', 'twisted.internet.iocpreactor',
'Win32 IO Completion Ports-based reactor.')
__all__.extend([
"wx", "gi", "gtk2", "gtk3", "glib2", "glade", "win32er", "iocp"
])
| mit |
thefinn93/CouchPotatoServer | libs/werkzeug/security.py | 75 | 4705 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import posixpath
from itertools import izip
from random import SystemRandom
# because the API of hmac changed with the introduction of the
# new hashlib module, we have to support both. This sets up a
# mapping to the digest factory functions and the digest modules
# (or factory functions with changed API)
try:
from hashlib import sha1, md5
_hash_funcs = _hash_mods = {'sha1': sha1, 'md5': md5}
_sha1_mod = sha1
_md5_mod = md5
except ImportError:
import sha as _sha1_mod, md5 as _md5_mod
_hash_mods = {'sha1': _sha1_mod, 'md5': _md5_mod}
_hash_funcs = {'sha1': _sha1_mod.new, 'md5': _md5_mod.new}
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if len(a) != len(b):
return False
rv = 0
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('requested salt of length <= 0')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in xrange(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password
if salt:
if method not in _hash_mods:
return None
if isinstance(salt, unicode):
salt = salt.encode('utf-8')
h = hmac.new(salt, None, _hash_mods[method])
else:
if method not in _hash_funcs:
return None
h = _hash_funcs[method]()
if isinstance(password, unicode):
password = password.encode('utf-8')
h.update(password)
return h.hexdigest()
def generate_password_hash(password, method='sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
:param password: the password to hash
:param method: the hash method to use (``'md5'`` or ``'sha1'``)
:param salt_length: the lengt of the salt in letters
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h = _hash_internal(method, salt, password)
if h is None:
raise TypeError('invalid method %r' % method)
return '%s$%s$%s' % (method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`
:param password: the plaintext password to compare against the hash
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password), hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| gpl-3.0 |
openstack/horizon | horizon/__init__.py | 7 | 2185 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The Horizon interface.
Contains the core Horizon classes--:class:`~horizon.Dashboard` and
:class:`horizon.Panel`--the dynamic URLconf for Horizon, and common interface
methods like :func:`~horizon.register` and :func:`~horizon.unregister`.
"""
# Because this module is compiled by setup.py before Django may be installed
# in the environment we try importing Django and issue a warning but move on
# should that fail.
Horizon = None
try:
from horizon.base import Dashboard
from horizon.base import Horizon
from horizon.base import Panel
from horizon.base import PanelGroup
except ImportError:
import warnings
def simple_warn(message, category, filename, lineno, file=None, line=None):
return '%s: %s' % (category.__name__, message)
msg = ("Could not import Horizon dependencies. "
"This is normal during installation.\n")
warnings.formatwarning = simple_warn
warnings.warn(msg, Warning)
if Horizon:
register = Horizon.register
unregister = Horizon.unregister
get_absolute_url = Horizon.get_absolute_url
get_user_home = Horizon.get_user_home
get_dashboard = Horizon.get_dashboard
get_default_dashboard = Horizon.get_default_dashboard
get_dashboards = Horizon.get_dashboards
urls = Horizon._lazy_urls
# silence flake8 about unused imports here:
__all__ = [
"Dashboard",
"Horizon",
"Panel",
"PanelGroup",
"register",
"unregister",
"get_absolute_url",
"get_user_home",
"get_dashboard",
"get_default_dashboard",
"get_dashboards",
"urls",
]
| apache-2.0 |
glwu/python-for-android | python-modules/twisted/twisted/words/test/test_msn.py | 53 | 20437 | # Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.words.protocols.msn}.
"""
# System imports
import StringIO
# Twisted imports
# t.w.p.msn requires an HTTP client
try:
# So try to get one - do it directly instead of catching an ImportError
# from t.w.p.msn so that other problems which cause that module to fail
# to import don't cause the tests to be skipped.
from twisted.web import client
except ImportError:
# If there isn't one, we're going to skip all the tests.
msn = None
else:
# Otherwise importing it should work, so do it.
from twisted.words.protocols import msn
from twisted.python.hashlib import md5
from twisted.protocols import loopback
from twisted.internet.defer import Deferred
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing
def printError(f):
print f
class PassportTests(unittest.TestCase):
def setUp(self):
self.result = []
self.deferred = Deferred()
self.deferred.addCallback(lambda r: self.result.append(r))
self.deferred.addErrback(printError)
def test_nexus(self):
"""
When L{msn.PassportNexus} receives enough information to identify the
address of the login server, it fires the L{Deferred} passed to its
initializer with that address.
"""
protocol = msn.PassportNexus(self.deferred, 'https://foobar.com/somepage.quux')
headers = {
'Content-Length' : '0',
'Content-Type' : 'text/html',
'PassportURLs' : 'DARealm=Passport.Net,DALogin=login.myserver.com/,DAReg=reg.myserver.com'
}
transport = StringTransport()
protocol.makeConnection(transport)
protocol.dataReceived('HTTP/1.0 200 OK\r\n')
for (h, v) in headers.items():
protocol.dataReceived('%s: %s\r\n' % (h,v))
protocol.dataReceived('\r\n')
self.assertEquals(self.result[0], "https://login.myserver.com/")
def _doLoginTest(self, response, headers):
protocol = msn.PassportLogin(self.deferred,'[email protected]','testpass','https://foo.com/', 'a')
protocol.makeConnection(StringTransport())
protocol.dataReceived(response)
for (h,v) in headers.items(): protocol.dataReceived('%s: %s\r\n' % (h,v))
protocol.dataReceived('\r\n')
def testPassportLoginSuccess(self):
headers = {
'Content-Length' : '0',
'Content-Type' : 'text/html',
'Authentication-Info' : "Passport1.4 da-status=success,tname=MSPAuth," +
"tname=MSPProf,tname=MSPSec,from-PP='somekey'," +
"ru=http://messenger.msn.com"
}
self._doLoginTest('HTTP/1.1 200 OK\r\n', headers)
self.failUnless(self.result[0] == (msn.LOGIN_SUCCESS, 'somekey'))
def testPassportLoginFailure(self):
headers = {
'Content-Type' : 'text/html',
'WWW-Authenticate' : 'Passport1.4 da-status=failed,' +
'srealm=Passport.NET,ts=-3,prompt,cburl=http://host.com,' +
'cbtxt=the%20error%20message'
}
self._doLoginTest('HTTP/1.1 401 Unauthorized\r\n', headers)
self.failUnless(self.result[0] == (msn.LOGIN_FAILURE, 'the error message'))
def testPassportLoginRedirect(self):
headers = {
'Content-Type' : 'text/html',
'Authentication-Info' : 'Passport1.4 da-status=redir',
'Location' : 'https://newlogin.host.com/'
}
self._doLoginTest('HTTP/1.1 302 Found\r\n', headers)
self.failUnless(self.result[0] == (msn.LOGIN_REDIRECT, 'https://newlogin.host.com/', 'a'))
if msn is not None:
class DummySwitchboardClient(msn.SwitchboardClient):
def userTyping(self, message):
self.state = 'TYPING'
def gotSendRequest(self, fileName, fileSize, cookie, message):
if fileName == 'foobar.ext' and fileSize == 31337 and cookie == 1234: self.state = 'INVITATION'
class DummyNotificationClient(msn.NotificationClient):
def loggedIn(self, userHandle, screenName, verified):
if userHandle == '[email protected]' and screenName == 'Test Screen Name' and verified:
self.state = 'LOGIN'
def gotProfile(self, message):
self.state = 'PROFILE'
def gotContactStatus(self, code, userHandle, screenName):
if code == msn.STATUS_AWAY and userHandle == "[email protected]" and screenName == "Test Screen Name":
self.state = 'INITSTATUS'
def contactStatusChanged(self, code, userHandle, screenName):
if code == msn.STATUS_LUNCH and userHandle == "[email protected]" and screenName == "Test Name":
self.state = 'NEWSTATUS'
def contactOffline(self, userHandle):
if userHandle == "[email protected]": self.state = 'OFFLINE'
def statusChanged(self, code):
if code == msn.STATUS_HIDDEN: self.state = 'MYSTATUS'
def listSynchronized(self, *args):
self.state = 'GOTLIST'
def gotPhoneNumber(self, listVersion, userHandle, phoneType, number):
msn.NotificationClient.gotPhoneNumber(self, listVersion, userHandle, phoneType, number)
self.state = 'GOTPHONE'
def userRemovedMe(self, userHandle, listVersion):
msn.NotificationClient.userRemovedMe(self, userHandle, listVersion)
c = self.factory.contacts.getContact(userHandle)
if not c and self.factory.contacts.version == listVersion: self.state = 'USERREMOVEDME'
def userAddedMe(self, userHandle, screenName, listVersion):
msn.NotificationClient.userAddedMe(self, userHandle, screenName, listVersion)
c = self.factory.contacts.getContact(userHandle)
if c and (c.lists | msn.REVERSE_LIST) and (self.factory.contacts.version == listVersion) and \
(screenName == 'Screen Name'):
self.state = 'USERADDEDME'
def gotSwitchboardInvitation(self, sessionID, host, port, key, userHandle, screenName):
if sessionID == 1234 and \
host == '192.168.1.1' and \
port == 1863 and \
key == '123.456' and \
userHandle == '[email protected]' and \
screenName == 'Screen Name':
self.state = 'SBINVITED'
class DispatchTests(unittest.TestCase):
"""
Tests for L{DispatchClient}.
"""
def _versionTest(self, serverVersionResponse):
"""
Test L{DispatchClient} version negotiation.
"""
client = msn.DispatchClient()
client.userHandle = "foo"
transport = StringTransport()
client.makeConnection(transport)
self.assertEquals(
transport.value(), "VER 1 MSNP8 CVR0\r\n")
transport.clear()
client.dataReceived(serverVersionResponse)
self.assertEquals(
transport.value(),
"CVR 2 0x0409 win 4.10 i386 MSNMSGR 5.0.0544 MSMSGS foo\r\n")
def test_version(self):
"""
L{DispatchClient.connectionMade} greets the server with a I{VER}
(version) message and then L{NotificationClient.dataReceived}
handles the server's I{VER} response by sending a I{CVR} (client
version) message.
"""
self._versionTest("VER 1 MSNP8 CVR0\r\n")
def test_versionWithoutCVR0(self):
"""
If the server responds to a I{VER} command without including the
I{CVR0} protocol, L{DispatchClient} behaves in the same way as if
that protocol were included.
Starting in August 2008, CVR0 disappeared from the I{VER} response.
"""
self._versionTest("VER 1 MSNP8\r\n")
class NotificationTests(unittest.TestCase):
""" testing the various events in NotificationClient """
def setUp(self):
self.client = DummyNotificationClient()
self.client.factory = msn.NotificationFactory()
self.client.state = 'START'
def tearDown(self):
self.client = None
def _versionTest(self, serverVersionResponse):
"""
Test L{NotificationClient} version negotiation.
"""
self.client.factory.userHandle = "foo"
transport = StringTransport()
self.client.makeConnection(transport)
self.assertEquals(
transport.value(), "VER 1 MSNP8 CVR0\r\n")
transport.clear()
self.client.dataReceived(serverVersionResponse)
self.assertEquals(
transport.value(),
"CVR 2 0x0409 win 4.10 i386 MSNMSGR 5.0.0544 MSMSGS foo\r\n")
def test_version(self):
"""
L{NotificationClient.connectionMade} greets the server with a I{VER}
(version) message and then L{NotificationClient.dataReceived}
handles the server's I{VER} response by sending a I{CVR} (client
version) message.
"""
self._versionTest("VER 1 MSNP8 CVR0\r\n")
def test_versionWithoutCVR0(self):
"""
If the server responds to a I{VER} command without including the
I{CVR0} protocol, L{NotificationClient} behaves in the same way as
if that protocol were included.
Starting in August 2008, CVR0 disappeared from the I{VER} response.
"""
self._versionTest("VER 1 MSNP8\r\n")
def test_challenge(self):
"""
L{NotificationClient} responds to a I{CHL} message by sending a I{QRY}
back which included a hash based on the parameters of the I{CHL}.
"""
transport = StringTransport()
self.client.makeConnection(transport)
transport.clear()
challenge = "15570131571988941333"
self.client.dataReceived('CHL 0 ' + challenge + '\r\n')
# md5 of the challenge and a magic string defined by the protocol
response = "8f2f5a91b72102cd28355e9fc9000d6e"
# Sanity check - the response is what the comment above says it is.
self.assertEquals(
response, md5(challenge + "Q1P7W2E4J9R8U3S5").hexdigest())
self.assertEquals(
transport.value(),
# 2 is the next transaction identifier. 32 is the length of the
# response.
"QRY 2 [email protected] 32\r\n" + response)
def testLogin(self):
self.client.lineReceived('USR 1 OK [email protected] Test%20Screen%20Name 1 0')
self.failUnless((self.client.state == 'LOGIN'), msg='Failed to detect successful login')
def testProfile(self):
m = 'MSG Hotmail Hotmail 353\r\nMIME-Version: 1.0\r\nContent-Type: text/x-msmsgsprofile; charset=UTF-8\r\n'
m += 'LoginTime: 1016941010\r\nEmailEnabled: 1\r\nMemberIdHigh: 40000\r\nMemberIdLow: -600000000\r\nlang_preference: 1033\r\n'
m += 'preferredEmail: [email protected]\r\ncountry: AU\r\nPostalCode: 90210\r\nGender: M\r\nKid: 0\r\nAge:\r\nsid: 400\r\n'
m += 'kv: 2\r\nMSPAuth: 2CACCBCCADMoV8ORoz64BVwmjtksIg!kmR!Rj5tBBqEaW9hc4YnPHSOQ$$\r\n\r\n'
map(self.client.lineReceived, m.split('\r\n')[:-1])
self.failUnless((self.client.state == 'PROFILE'), msg='Failed to detect initial profile')
def testStatus(self):
t = [('ILN 1 AWY [email protected] Test%20Screen%20Name 0', 'INITSTATUS', 'Failed to detect initial status report'),
('NLN LUN [email protected] Test%20Name 0', 'NEWSTATUS', 'Failed to detect contact status change'),
('FLN [email protected]', 'OFFLINE', 'Failed to detect contact signing off'),
('CHG 1 HDN 0', 'MYSTATUS', 'Failed to detect my status changing')]
for i in t:
self.client.lineReceived(i[0])
self.failUnless((self.client.state == i[1]), msg=i[2])
def testListSync(self):
# currently this test does not take into account the fact
# that BPRs sent as part of the SYN reply may not be interpreted
# as such if they are for the last LST -- maybe I should
# factor this in later.
self.client.makeConnection(StringTransport())
msn.NotificationClient.loggedIn(self.client, '[email protected]', 'foobar', 1)
lines = [
"SYN %s 100 1 1" % self.client.currentID,
"GTC A",
"BLP AL",
"LSG 0 Other%20Contacts 0",
"LST [email protected] Some%20Name 11 0"
]
map(self.client.lineReceived, lines)
contacts = self.client.factory.contacts
contact = contacts.getContact('[email protected]')
self.failUnless(contacts.version == 100, "Invalid contact list version")
self.failUnless(contact.screenName == 'Some Name', "Invalid screen-name for user")
self.failUnless(contacts.groups == {0 : 'Other Contacts'}, "Did not get proper group list")
self.failUnless(contact.groups == [0] and contact.lists == 11, "Invalid contact list/group info")
self.failUnless(self.client.state == 'GOTLIST', "Failed to call list sync handler")
def testAsyncPhoneChange(self):
c = msn.MSNContact(userHandle='[email protected]')
self.client.factory.contacts = msn.MSNContactList()
self.client.factory.contacts.addContact(c)
self.client.makeConnection(StringTransport())
self.client.lineReceived("BPR 101 [email protected] PHH 123%20456")
c = self.client.factory.contacts.getContact('[email protected]')
self.failUnless(self.client.state == 'GOTPHONE', "Did not fire phone change callback")
self.failUnless(c.homePhone == '123 456', "Did not update the contact's phone number")
self.failUnless(self.client.factory.contacts.version == 101, "Did not update list version")
def testLateBPR(self):
"""
This test makes sure that if a BPR response that was meant
to be part of a SYN response (but came after the last LST)
is received, the correct contact is updated and all is well
"""
self.client.makeConnection(StringTransport())
msn.NotificationClient.loggedIn(self.client, '[email protected]', 'foo', 1)
lines = [
"SYN %s 100 1 1" % self.client.currentID,
"GTC A",
"BLP AL",
"LSG 0 Other%20Contacts 0",
"LST [email protected] Some%20Name 11 0",
"BPR PHH 123%20456"
]
map(self.client.lineReceived, lines)
contact = self.client.factory.contacts.getContact('[email protected]')
self.failUnless(contact.homePhone == '123 456', "Did not update contact's phone number")
def testUserRemovedMe(self):
self.client.factory.contacts = msn.MSNContactList()
contact = msn.MSNContact(userHandle='[email protected]')
contact.addToList(msn.REVERSE_LIST)
self.client.factory.contacts.addContact(contact)
self.client.lineReceived("REM 0 RL 100 [email protected]")
self.failUnless(self.client.state == 'USERREMOVEDME', "Failed to remove user from reverse list")
def testUserAddedMe(self):
self.client.factory.contacts = msn.MSNContactList()
self.client.lineReceived("ADD 0 RL 100 [email protected] Screen%20Name")
self.failUnless(self.client.state == 'USERADDEDME', "Failed to add user to reverse lise")
def testAsyncSwitchboardInvitation(self):
self.client.lineReceived("RNG 1234 192.168.1.1:1863 CKI 123.456 [email protected] Screen%20Name")
self.failUnless(self.client.state == "SBINVITED")
def testCommandFailed(self):
"""
Ensures that error responses from the server fires an errback with
MSNCommandFailed.
"""
id, d = self.client._createIDMapping()
self.client.lineReceived("201 %s" % id)
d = self.assertFailure(d, msn.MSNCommandFailed)
def assertErrorCode(exception):
self.assertEqual(201, exception.errorCode)
return d.addCallback(assertErrorCode)
class MessageHandlingTests(unittest.TestCase):
""" testing various message handling methods from SwichboardClient """
def setUp(self):
self.client = DummySwitchboardClient()
self.client.state = 'START'
def tearDown(self):
self.client = None
def testClientCapabilitiesCheck(self):
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-clientcaps')
self.assertEquals(self.client.checkMessage(m), 0, 'Failed to detect client capability message')
def testTypingCheck(self):
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgscontrol')
m.setHeader('TypingUser', 'foo@bar')
self.client.checkMessage(m)
self.failUnless((self.client.state == 'TYPING'), msg='Failed to detect typing notification')
def testFileInvitation(self, lazyClient=False):
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Application-Name: File Transfer\r\n'
if not lazyClient:
m.message += 'Application-GUID: {5D3E02AB-6190-11d3-BBBB-00C04F795683}\r\n'
m.message += 'Invitation-Command: Invite\r\n'
m.message += 'Invitation-Cookie: 1234\r\n'
m.message += 'Application-File: foobar.ext\r\n'
m.message += 'Application-FileSize: 31337\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'INVITATION'), msg='Failed to detect file transfer invitation')
def testFileInvitationMissingGUID(self):
return self.testFileInvitation(True)
def testFileResponse(self):
d = Deferred()
d.addCallback(self.fileResponse)
self.client.cookies['iCookies'][1234] = (d, None)
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Invitation-Command: ACCEPT\r\n'
m.message += 'Invitation-Cookie: 1234\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'RESPONSE'), msg='Failed to detect file transfer response')
def testFileInfo(self):
d = Deferred()
d.addCallback(self.fileInfo)
self.client.cookies['external'][1234] = (d, None)
m = msn.MSNMessage()
m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8')
m.message += 'Invitation-Command: ACCEPT\r\n'
m.message += 'Invitation-Cookie: 1234\r\n'
m.message += 'IP-Address: 192.168.0.1\r\n'
m.message += 'Port: 6891\r\n'
m.message += 'AuthCookie: 4321\r\n\r\n'
self.client.checkMessage(m)
self.failUnless((self.client.state == 'INFO'), msg='Failed to detect file transfer info')
def fileResponse(self, (accept, cookie, info)):
if accept and cookie == 1234: self.client.state = 'RESPONSE'
def fileInfo(self, (accept, ip, port, aCookie, info)):
if accept and ip == '192.168.0.1' and port == 6891 and aCookie == 4321: self.client.state = 'INFO'
class FileTransferTestCase(unittest.TestCase):
"""
test FileSend against FileReceive
"""
def setUp(self):
self.input = 'a' * 7000
self.output = StringIOWithoutClosing()
def tearDown(self):
self.input = None
self.output = None
def test_fileTransfer(self):
"""
Test L{FileSend} against L{FileReceive} using a loopback transport.
"""
auth = 1234
sender = msn.FileSend(StringIO.StringIO(self.input))
sender.auth = auth
sender.fileSize = 7000
client = msn.FileReceive(auth, "[email protected]", self.output)
client.fileSize = 7000
def check(ignored):
self.assertTrue(
client.completed and sender.completed,
msg="send failed to complete")
self.assertEqual(
self.input, self.output.getvalue(),
msg="saved file does not match original")
d = loopback.loopbackAsync(sender, client)
d.addCallback(check)
return d
if msn is None:
for testClass in [PassportTests, NotificationTests,
MessageHandlingTests, FileTransferTestCase]:
testClass.skip = (
"MSN requires an HTTP client but none is available, "
"skipping tests.")
| apache-2.0 |
ayoubg/gem5-graphics | gem5/src/dev/Uart.py | 66 | 1976 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
class Uart(BasicPioDevice):
type = 'Uart'
abstract = True
cxx_header = "dev/uart.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
terminal = Param.Terminal(Parent.any, "The terminal")
class Uart8250(Uart):
type = 'Uart8250'
cxx_header = "dev/uart8250.hh"
| bsd-3-clause |
r0qs/chubby | Fonte/Menu_example.py | 1 | 1250 | from Game import *
from Menu import *
def new_game_function():
game_main()
def option_function():
main()
def main():
width = 1024
height = 768
pygame.display.init
menu_screen = pygame.display.set_mode((width,height))
# Background
background = pygame.image.load(os.path.join('', 'images', 'menu_bg.jpg'))
background = background.convert()
# Cursor
pygame.mouse.set_visible(False)
cursor = Cursor(16,16,'images/cursor.png')
#Options in menu
new_game = Option(200,200,173,89,'images/little.png','images/big.png',new_game_function, 1.42)
# Menu
menu = Menu()
menu.append(new_game)
menu_screen.blit(background, (0, 0))
cursor.draw(menu_screen)
menu.draw(menu_screen)
pygame.display.flip()
# Event loop
while 1:
menu.update(cursor)
for event in pygame.event.get():
if event.type == QUIT:
pygame.display.quit
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
running = False
return
elif event.type == MOUSEMOTION:
pygame.mouse.get_pos()
cursor.update()
elif event.type == MOUSEBUTTONDOWN:
menu.activate()
menu_screen.blit(background, (0, 0))
menu.draw(menu_screen)
cursor.draw(menu_screen)
pygame.display.flip()
if __name__ == '__main__': main()
| gpl-3.0 |
prashantv/thrift | tutorial/py.tornado/PythonClient.py | 68 | 3152 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import glob
sys.path.append('gen-py.tornado')
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
import logging
from tutorial import Calculator
from tutorial.ttypes import Operation, Work, InvalidOperation
from thrift import TTornado
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from tornado import gen
from tornado import ioloop
@gen.engine
def communicate(callback=None):
# create client
transport = TTornado.TTornadoStreamTransport('localhost', 9090)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
client = Calculator.Client(transport, pfactory)
# open the transport, bail on error
try:
yield gen.Task(transport.open)
except TTransport.TTransportException as ex:
logging.error(ex)
if callback:
callback()
return
# ping
yield gen.Task(client.ping)
print "ping()"
# add
sum_ = yield gen.Task(client.add, 1, 1)
print "1 + 1 = {}".format(sum_)
# make a oneway call without a callback (schedule the write and continue
# without blocking)
client.zip()
print "zip() without callback"
# make a oneway call with a callback (we'll wait for the stream write to
# complete before continuing)
yield gen.Task(client.zip)
print "zip() with callback"
# calculate 1/0
work = Work()
work.op = Operation.DIVIDE
work.num1 = 1
work.num2 = 0
try:
quotient = yield gen.Task(client.calculate, 1, work)
print "Whoa? You know how to divide by zero?"
except InvalidOperation as io:
print "InvalidOperation: {}".format(io)
# calculate 15-10
work.op = Operation.SUBTRACT
work.num1 = 15
work.num2 = 10
diff = yield gen.Task(client.calculate, 1, work)
print "15 - 10 = {}".format(diff)
# getStruct
log = yield gen.Task(client.getStruct, 1)
print "Check log: {}".format(log.value)
# close the transport
client._transport.close()
if callback:
callback()
def main():
# create an ioloop, do the above, then stop
io_loop = ioloop.IOLoop.instance()
def this_joint():
communicate(callback=io_loop.stop)
io_loop.add_callback(this_joint)
io_loop.start()
if __name__ == "__main__":
main()
| apache-2.0 |
zhujzhuo/Sahara | sahara/utils/openstack/heat.py | 6 | 2155 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import client as heat_client
from oslo_config import cfg
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.utils.openstack import base
opts = [
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to heat.'),
cfg.StrOpt('ca_file',
help='Location of ca certificates file to use for heat '
'client requests.')
]
heat_group = cfg.OptGroup(name='heat',
title='Heat client options')
CONF = cfg.CONF
CONF.register_group(heat_group)
CONF.register_opts(opts, group=heat_group)
def client():
ctx = context.current()
heat_url = base.url_for(ctx.service_catalog, 'orchestration')
return heat_client.Client('1', heat_url, token=ctx.auth_token,
cert_file=CONF.heat.ca_file,
insecure=CONF.heat.api_insecure)
def get_stack(stack_name):
heat = client()
for stack in heat.stacks.list():
if stack.stack_name == stack_name:
return stack
raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack):
# NOTE: expected empty status because status of stack
# maybe is not set in heat database
while stack.status in ['IN_PROGRESS', '']:
context.sleep(1)
stack.get()
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status)
| apache-2.0 |
nicky-ji/edx-nicky | lms/djangoapps/courseware/models.py | 35 | 9221 | """
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration courseware --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/courseware/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from xmodule_django.models import CourseKeyField, LocationKeyField
class StudentModule(models.Model):
"""
Keeps student state for a particular module in a particular course.
"""
MODEL_TAGS = ['course_id', 'module_type']
# For a homework problem, contains a JSON
# object consisting of state
MODULE_TYPES = (('problem', 'problem'),
('video', 'video'),
('html', 'html'),
)
## These three are the key for the object
module_type = models.CharField(max_length=32, choices=MODULE_TYPES, default='problem', db_index=True)
# Key used to share state. By default, this is the module_id,
# but for abtests and the like, this can be set to a shared value
# for many instances of the module.
# Filename for homeworks, etc.
module_state_key = LocationKeyField(max_length=255, db_index=True, db_column='module_id')
student = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
class Meta:
unique_together = (('student', 'module_state_key', 'course_id'),)
## Internal state of the object
state = models.TextField(null=True, blank=True)
## Grade, and are we done?
grade = models.FloatField(null=True, blank=True, db_index=True)
max_grade = models.FloatField(null=True, blank=True)
DONE_TYPES = (('na', 'NOT_APPLICABLE'),
('f', 'FINISHED'),
('i', 'INCOMPLETE'),
)
done = models.CharField(max_length=8, choices=DONE_TYPES, default='na', db_index=True)
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
@classmethod
def all_submitted_problems_read_only(cls, course_id):
"""
Return all model instances that correspond to problems that have been
submitted for a given course. So module_type='problem' and a non-null
grade. Use a read replica if one exists for this environment.
"""
queryset = cls.objects.filter(
course_id=course_id,
module_type='problem',
grade__isnull=False
)
if "read_replica" in settings.DATABASES:
return queryset.using("read_replica")
else:
return queryset
def __repr__(self):
return 'StudentModule<%r>' % ({
'course_id': self.course_id,
'module_type': self.module_type,
'student': self.student.username,
'module_state_key': self.module_state_key,
'state': str(self.state)[:20],
},)
def __unicode__(self):
return unicode(repr(self))
class StudentModuleHistory(models.Model):
"""Keeps a complete history of state changes for a given XModule for a given
Student. Right now, we restrict this to problems so that the table doesn't
explode in size."""
HISTORY_SAVING_TYPES = {'problem'}
class Meta:
get_latest_by = "created"
student_module = models.ForeignKey(StudentModule, db_index=True)
version = models.CharField(max_length=255, null=True, blank=True, db_index=True)
# This should be populated from the modified field in StudentModule
created = models.DateTimeField(db_index=True)
state = models.TextField(null=True, blank=True)
grade = models.FloatField(null=True, blank=True)
max_grade = models.FloatField(null=True, blank=True)
@receiver(post_save, sender=StudentModule)
def save_history(sender, instance, **kwargs): # pylint: disable=no-self-argument, unused-argument
"""
Checks the instance's module_type, and creates & saves a
StudentModuleHistory entry if the module_type is one that
we save.
"""
if instance.module_type in StudentModuleHistory.HISTORY_SAVING_TYPES:
history_entry = StudentModuleHistory(student_module=instance,
version=None,
created=instance.modified,
state=instance.state,
grade=instance.grade,
max_grade=instance.max_grade)
history_entry.save()
class XModuleUserStateSummaryField(models.Model):
"""
Stores data set in the Scope.user_state_summary scope by an xmodule field
"""
class Meta:
unique_together = (('usage_id', 'field_name'),)
# The name of the field
field_name = models.CharField(max_length=64, db_index=True)
# The definition id for the module
usage_id = LocationKeyField(max_length=255, db_index=True)
# The value of the field. Defaults to None dumped as json
value = models.TextField(default='null')
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
def __repr__(self):
return 'XModuleUserStateSummaryField<%r>' % ({
'field_name': self.field_name,
'usage_id': self.usage_id,
'value': self.value,
},)
def __unicode__(self):
return unicode(repr(self))
class XModuleStudentPrefsField(models.Model):
"""
Stores data set in the Scope.preferences scope by an xmodule field
"""
class Meta:
unique_together = (('student', 'module_type', 'field_name'),)
# The name of the field
field_name = models.CharField(max_length=64, db_index=True)
# The type of the module for these preferences
module_type = models.CharField(max_length=64, db_index=True)
# The value of the field. Defaults to None dumped as json
value = models.TextField(default='null')
student = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
def __repr__(self):
return 'XModuleStudentPrefsField<%r>' % ({
'field_name': self.field_name,
'module_type': self.module_type,
'student': self.student.username,
'value': self.value,
},)
def __unicode__(self):
return unicode(repr(self))
class XModuleStudentInfoField(models.Model):
"""
Stores data set in the Scope.preferences scope by an xmodule field
"""
class Meta:
unique_together = (('student', 'field_name'),)
# The name of the field
field_name = models.CharField(max_length=64, db_index=True)
# The value of the field. Defaults to None dumped as json
value = models.TextField(default='null')
student = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
def __repr__(self):
return 'XModuleStudentInfoField<%r>' % ({
'field_name': self.field_name,
'student': self.student.username,
'value': self.value,
},)
def __unicode__(self):
return unicode(repr(self))
class OfflineComputedGrade(models.Model):
"""
Table of grades computed offline for a given user and course.
"""
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
gradeset = models.TextField(null=True, blank=True) # grades, stored as JSON
class Meta:
unique_together = (('user', 'course_id'), )
def __unicode__(self):
return "[OfflineComputedGrade] %s: %s (%s) = %s" % (self.user, self.course_id, self.created, self.gradeset)
class OfflineComputedGradeLog(models.Model):
"""
Log of when offline grades are computed.
Use this to be able to show instructor when the last computed grades were done.
"""
class Meta:
ordering = ["-created"]
get_latest_by = "created"
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
seconds = models.IntegerField(default=0) # seconds elapsed for computation
nstudents = models.IntegerField(default=0)
def __unicode__(self):
return "[OCGLog] %s: %s" % (self.course_id.to_deprecated_string(), self.created) # pylint: disable=no-member
| agpl-3.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/metrics/pairwise.py | 1 | 42672 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| apache-2.0 |
zottejos/merelcoin | qa/rpc-tests/bipdersig.py | 136 | 3261 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the BIP66 changeover logic
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
class BIP66Test(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].setgenerate(True, 100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].setgenerate(True, 50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].setgenerate(True, 1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionFailure("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].setgenerate(True, 99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].setgenerate(True, 1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].setgenerate(True, 1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].setgenerate(True, 1)
raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].setgenerate(True, 1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
| mit |
OpenRCE/sulley | sulley/sessions.py | 1 | 48671 | import os
import re
import sys
import zlib
import time
import socket
import httplib
import cPickle
import threading
import BaseHTTPServer
import httplib
import logging
import blocks
import pedrpc
import pgraph
import sex
import primitives
########################################################################################################################
class target:
'''
Target descriptor container.
'''
def __init__ (self, host, port, **kwargs):
'''
@type host: String
@param host: Hostname or IP address of target system
@type port: Integer
@param port: Port of target service
'''
self.host = host
self.port = port
# set these manually once target is instantiated.
self.netmon = None
self.procmon = None
self.vmcontrol = None
self.netmon_options = {}
self.procmon_options = {}
self.vmcontrol_options = {}
def pedrpc_connect (self):
'''
Pass specified target parameters to the PED-RPC server.
'''
# If the process monitor is alive, set it's options
if self.procmon:
while 1:
try:
if self.procmon.alive():
break
except:
pass
time.sleep(1)
# connection established.
for key in self.procmon_options.keys():
eval('self.procmon.set_%s(self.procmon_options["%s"])' % (key, key))
# If the network monitor is alive, set it's options
if self.netmon:
while 1:
try:
if self.netmon.alive():
break
except:
pass
time.sleep(1)
# connection established.
for key in self.netmon_options.keys():
eval('self.netmon.set_%s(self.netmon_options["%s"])' % (key, key))
########################################################################################################################
class connection (pgraph.edge.edge):
def __init__ (self, src, dst, callback=None):
'''
Extends pgraph.edge with a callback option. This allows us to register a function to call between node
transmissions to implement functionality such as challenge response systems. The callback method must follow
this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet.
@type src: Integer
@param src: Edge source ID
@type dst: Integer
@param dst: Edge destination ID
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
'''
# run the parent classes initialization routine first.
pgraph.edge.edge.__init__(self, src, dst)
self.callback = callback
########################################################################################################################
class session (pgraph.graph):
def __init__(
self,
session_filename=None,
skip=0,
sleep_time=1.0,
log_level=logging.INFO,
logfile=None,
logfile_level=logging.DEBUG,
proto="tcp",
bind=None,
restart_interval=0,
timeout=5.0,
web_port=26000,
crash_threshold=3,
restart_sleep_time=300
):
'''
Extends pgraph.graph and provides a container for architecting protocol dialogs.
@type session_filename: String
@kwarg session_filename: (Optional, def=None) Filename to serialize persistant data to
@type skip: Integer
@kwarg skip: (Optional, def=0) Number of test cases to skip
@type sleep_time: Float
@kwarg sleep_time: (Optional, def=1.0) Time to sleep in between tests
@type log_level: Integer
@kwarg log_level: (Optional, def=logger.INFO) Set the log level
@type logfile: String
@kwarg logfile: (Optional, def=None) Name of log file
@type logfile_level: Integer
@kwarg logfile_level: (Optional, def=logger.INFO) Set the log level for the logfile
@type proto: String
@kwarg proto: (Optional, def="tcp") Communication protocol ("tcp", "udp", "ssl")
@type bind: Tuple (host, port)
@kwarg bind: (Optional, def=random) Socket bind address and port
@type timeout: Float
@kwarg timeout: (Optional, def=5.0) Seconds to wait for a send/recv prior to timing out
@type restart_interval: Integer
@kwarg restart_interval (Optional, def=0) Restart the target after n test cases, disable by setting to 0
@type crash_threshold: Integer
@kwarg crash_threshold (Optional, def=3) Maximum number of crashes allowed before a node is exhaust
@type restart_sleep_time: Integer
@kwarg restart_sleep_time: Optional, def=300) Time in seconds to sleep when target can't be restarted
@type web_port: Integer
@kwarg web_port: (Optional, def=26000) Port for monitoring fuzzing campaign via a web browser
'''
# run the parent classes initialization routine first.
pgraph.graph.__init__(self)
self.session_filename = session_filename
self.skip = skip
self.sleep_time = sleep_time
self.proto = proto.lower()
self.bind = bind
self.ssl = False
self.restart_interval = restart_interval
self.timeout = timeout
self.web_port = web_port
self.crash_threshold = crash_threshold
self.restart_sleep_time = restart_sleep_time
# Initialize logger
self.logger = logging.getLogger("Sulley_logger")
self.logger.setLevel(log_level)
formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] -> %(message)s')
if logfile != None:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logfile_level)
filehandler.setFormatter(formatter)
self.logger.addHandler(filehandler)
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
consolehandler.setLevel(log_level)
self.logger.addHandler(consolehandler)
self.total_num_mutations = 0
self.total_mutant_index = 0
self.fuzz_node = None
self.targets = []
self.netmon_results = {}
self.procmon_results = {}
self.protmon_results = {}
self.pause_flag = False
self.crashing_primitives = {}
if self.proto == "tcp":
self.proto = socket.SOCK_STREAM
elif self.proto == "ssl":
self.proto = socket.SOCK_STREAM
self.ssl = True
elif self.proto == "udp":
self.proto = socket.SOCK_DGRAM
else:
raise sex.SullyRuntimeError("INVALID PROTOCOL SPECIFIED: %s" % self.proto)
# import settings if they exist.
self.import_file()
# create a root node. we do this because we need to start fuzzing from a single point and the user may want
# to specify a number of initial requests.
self.root = pgraph.node()
self.root.name = "__ROOT_NODE__"
self.root.label = self.root.name
self.last_recv = None
self.add_node(self.root)
####################################################################################################################
def add_node (self, node):
'''
Add a pgraph node to the graph. We overload this routine to automatically generate and assign an ID whenever a
node is added.
@type node: pGRAPH Node
@param node: Node to add to session graph
'''
node.number = len(self.nodes)
node.id = len(self.nodes)
if not self.nodes.has_key(node.id):
self.nodes[node.id] = node
return self
####################################################################################################################
def add_target (self, target):
'''
Add a target to the session. Multiple targets can be added for parallel fuzzing.
@type target: session.target
@param target: Target to add to session
'''
# pass specified target parameters to the PED-RPC server.
target.pedrpc_connect()
# add target to internal list.
self.targets.append(target)
####################################################################################################################
def connect (self, src, dst=None, callback=None):
'''
Create a connection between the two requests (nodes) and register an optional callback to process in between
transmissions of the source and destination request. Leverage this functionality to handle situations such as
challenge response systems. The session class maintains a top level node that all initial requests must be
connected to. Example::
sess = sessions.session()
sess.connect(sess.root, s_get("HTTP"))
If given only a single parameter, sess.connect() will default to attaching the supplied node to the root node.
This is a convenient alias and is identical to the second line from the above example::
sess.connect(s_get("HTTP"))
If you register callback method, it must follow this prototype::
def callback(session, node, edge, sock)
Where node is the node about to be sent, edge is the last edge along the current fuzz path to "node", session
is a pointer to the session instance which is useful for snagging data such as sesson.last_recv which contains
the data returned from the last socket transmission and sock is the live socket. A callback is also useful in
situations where, for example, the size of the next packet is specified in the first packet. As another
example, if you need to fill in the dynamic IP address of the target register a callback that snags the IP
from sock.getpeername()[0].
@type src: String or Request (Node)
@param src: Source request name or request node
@type dst: String or Request (Node)
@param dst: Destination request name or request node
@type callback: Function
@param callback: (Optional, def=None) Callback function to pass received data to between node xmits
@rtype: pgraph.edge
@return: The edge between the src and dst.
'''
# if only a source was provided, then make it the destination and set the source to the root node.
if not dst:
dst = src
src = self.root
# if source or destination is a name, resolve the actual node.
if type(src) is str:
src = self.find_node("name", src)
if type(dst) is str:
dst = self.find_node("name", dst)
# if source or destination is not in the graph, add it.
if src != self.root and not self.find_node("name", src.name):
self.add_node(src)
if not self.find_node("name", dst.name):
self.add_node(dst)
# create an edge between the two nodes and add it to the graph.
edge = connection(src.id, dst.id, callback)
self.add_edge(edge)
return edge
####################################################################################################################
def export_file (self):
'''
Dump various object values to disk.
@see: import_file()
'''
if not self.session_filename:
return
data = {}
data["session_filename"] = self.session_filename
data["skip"] = self.total_mutant_index
data["sleep_time"] = self.sleep_time
data["restart_sleep_time"] = self.restart_sleep_time
data["proto"] = self.proto
data["restart_interval"] = self.restart_interval
data["timeout"] = self.timeout
data["web_port"] = self.web_port
data["crash_threshold"] = self.crash_threshold
data["total_num_mutations"] = self.total_num_mutations
data["total_mutant_index"] = self.total_mutant_index
data["netmon_results"] = self.netmon_results
data["procmon_results"] = self.procmon_results
data['protmon_results'] = self.protmon_results
data["pause_flag"] = self.pause_flag
fh = open(self.session_filename, "wb+")
fh.write(zlib.compress(cPickle.dumps(data, protocol=2)))
fh.close()
####################################################################################################################
def fuzz (self, this_node=None, path=[]):
'''
Call this routine to get the ball rolling. No arguments are necessary as they are both utilized internally
during the recursive traversal of the session graph.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
'''
# if no node is specified, then we start from the root node and initialize the session.
if not this_node:
# we can't fuzz if we don't have at least one target and one request.
if not self.targets:
raise sex.SullyRuntimeError("NO TARGETS SPECIFIED IN SESSION")
if not self.edges_from(self.root.id):
raise sex.SullyRuntimeError("NO REQUESTS SPECIFIED IN SESSION")
this_node = self.root
try: self.server_init()
except: return
# TODO: complete parallel fuzzing, will likely have to thread out each target
target = self.targets[0]
# step through every edge from the current node.
for edge in self.edges_from(this_node.id):
# the destination node is the one actually being fuzzed.
self.fuzz_node = self.nodes[edge.dst]
num_mutations = self.fuzz_node.num_mutations()
# keep track of the path as we fuzz through it, don't count the root node.
# we keep track of edges as opposed to nodes because if there is more then one path through a set of
# given nodes we don't want any ambiguity.
path.append(edge)
current_path = " -> ".join([self.nodes[e.src].name for e in path[1:]])
current_path += " -> %s" % self.fuzz_node.name
self.logger.info("current fuzz path: %s" % current_path)
self.logger.info("fuzzed %d of %d total cases" % (self.total_mutant_index, self.total_num_mutations))
done_with_fuzz_node = False
crash_count = 0
# loop through all possible mutations of the fuzz node.
while not done_with_fuzz_node:
# if we need to pause, do so.
self.pause()
# if we have exhausted the mutations of the fuzz node, break out of the while(1).
# note: when mutate() returns False, the node has been reverted to the default (valid) state.
if not self.fuzz_node.mutate():
self.logger.error("all possible mutations for current fuzz node exhausted")
done_with_fuzz_node = True
continue
# make a record in the session that a mutation was made.
self.total_mutant_index += 1
# if we've hit the restart interval, restart the target.
if self.restart_interval and self.total_mutant_index % self.restart_interval == 0:
self.logger.error("restart interval of %d reached" % self.restart_interval)
self.restart_target(target)
# exception error handling routine, print log message and restart target.
def error_handler (e, msg, target, sock=None):
if sock:
sock.close()
msg += "\nException caught: %s" % repr(e)
msg += "\nRestarting target and trying again"
self.logger.critical(msg)
self.restart_target(target)
# if we don't need to skip the current test case.
if self.total_mutant_index > self.skip:
self.logger.info("fuzzing %d of %d" % (self.fuzz_node.mutant_index, num_mutations))
# attempt to complete a fuzz transmission. keep trying until we are successful, whenever a failure
# occurs, restart the target.
while 1:
# instruct the debugger/sniffer that we are about to send a new fuzz.
if target.procmon:
try:
target.procmon.pre_send(self.total_mutant_index)
except Exception, e:
error_handler(e, "failed on procmon.pre_send()", target)
continue
if target.netmon:
try:
target.netmon.pre_send(self.total_mutant_index)
except Exception, e:
error_handler(e, "failed on netmon.pre_send()", target)
continue
try:
# establish a connection to the target.
(family, socktype, proto, canonname, sockaddr)=socket.getaddrinfo(target.host, target.port)[0]
sock = socket.socket(family, self.proto)
except Exception, e:
error_handler(e, "failed creating socket", target)
continue
if self.bind:
try:
sock.bind(self.bind)
except Exception, e:
error_handler(e, "failed binding on socket", target, sock)
continue
try:
sock.settimeout(self.timeout)
# Connect is needed only for TCP stream
if self.proto == socket.SOCK_STREAM:
sock.connect((target.host, target.port))
except Exception, e:
error_handler(e, "failed connecting on socket", target, sock)
continue
# if SSL is requested, then enable it.
if self.ssl:
try:
ssl = socket.ssl(sock)
sock = httplib.FakeSocket(sock, ssl)
except Exception, e:
error_handler(e, "failed ssl setup", target, sock)
continue
# if the user registered a pre-send function, pass it the sock and let it do the deed.
try:
self.pre_send(sock)
except Exception, e:
error_handler(e, "pre_send() failed", target, sock)
continue
# send out valid requests for each node in the current path up to the node we are fuzzing.
try:
for e in path[:-1]:
node = self.nodes[e.dst]
self.transmit(sock, node, e, target)
except Exception, e:
error_handler(e, "failed transmitting a node up the path", target, sock)
continue
# now send the current node we are fuzzing.
try:
self.transmit(sock, self.fuzz_node, edge, target)
except Exception, e:
error_handler(e, "failed transmitting fuzz node", target, sock)
continue
# if we reach this point the send was successful for break out of the while(1).
break
# if the user registered a post-send function, pass it the sock and let it do the deed.
# we do this outside the try/except loop because if our fuzz causes a crash then the post_send()
# will likely fail and we don't want to sit in an endless loop.
try:
self.post_send(sock)
except Exception, e:
error_handler(e, "post_send() failed", target, sock)
# done with the socket.
sock.close()
# delay in between test cases.
self.logger.info("sleeping for %f seconds" % self.sleep_time)
time.sleep(self.sleep_time)
# poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
self.poll_pedrpc(target)
# serialize the current session state to disk.
self.export_file()
# recursively fuzz the remainder of the nodes in the session graph.
self.fuzz(self.fuzz_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
# loop to keep the main thread running and be able to receive signals
if self.signal_module:
# wait for a signal only if fuzzing is finished (this function is recursive)
# if fuzzing is not finished, web interface thread will catch it
if self.total_mutant_index == self.total_num_mutations:
import signal
try:
while True:
signal.pause()
except AttributeError:
# signal.pause() is missing for Windows; wait 1ms and loop instead
while True:
time.sleep(0.001)
####################################################################################################################
def import_file (self):
'''
Load varous object values from disk.
@see: export_file()
'''
try:
fh = open(self.session_filename, "rb")
data = cPickle.loads(zlib.decompress(fh.read()))
fh.close()
except:
return
# update the skip variable to pick up fuzzing from last test case.
self.skip = data["total_mutant_index"]
self.session_filename = data["session_filename"]
self.sleep_time = data["sleep_time"]
self.restart_sleep_time = data["restart_sleep_time"]
self.proto = data["proto"]
self.restart_interval = data["restart_interval"]
self.timeout = data["timeout"]
self.web_port = data["web_port"]
self.crash_threshold = data["crash_threshold"]
self.total_num_mutations = data["total_num_mutations"]
self.total_mutant_index = data["total_mutant_index"]
self.netmon_results = data["netmon_results"]
self.procmon_results = data["procmon_results"]
self.protmon_results = data["protmon_results"]
self.pause_flag = data["pause_flag"]
####################################################################################################################
#def log (self, msg, level=1):
'''
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: String
@param msg: Message to log
'''
#
#if self.log_level >= level:
#print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
####################################################################################################################
def num_mutations (self, this_node=None, path=[]):
'''
Number of total mutations in the graph. The logic of this routine is identical to that of fuzz(). See fuzz()
for inline comments. The member varialbe self.total_num_mutations is updated appropriately by this routine.
@type this_node: request (node)
@param this_node: (Optional, def=None) Current node that is being fuzzed.
@type path: List
@param path: (Optional, def=[]) Nodes along the path to the current one being fuzzed.
@rtype: Integer
@return: Total number of mutations in this session.
'''
if not this_node:
this_node = self.root
self.total_num_mutations = 0
for edge in self.edges_from(this_node.id):
next_node = self.nodes[edge.dst]
self.total_num_mutations += next_node.num_mutations()
if edge.src != self.root.id:
path.append(edge)
self.num_mutations(next_node, path)
# finished with the last node on the path, pop it off the path stack.
if path:
path.pop()
return self.total_num_mutations
####################################################################################################################
def pause (self):
'''
If thet pause flag is raised, enter an endless loop until it is lowered.
'''
while 1:
if self.pause_flag:
time.sleep(1)
else:
break
####################################################################################################################
def poll_pedrpc (self, target):
'''
Poll the PED-RPC endpoints (netmon, procmon etc...) for the target.
@type target: session.target
@param target: Session target whose PED-RPC services we are polling
'''
# kill the pcap thread and see how many bytes the sniffer recorded.
if target.netmon:
bytes = target.netmon.post_send()
self.logger.info("netmon captured %d bytes for test case #%d" % (bytes, self.total_mutant_index))
self.netmon_results[self.total_mutant_index] = bytes
# check if our fuzz crashed the target. procmon.post_send() returns False if the target access violated.
if target.procmon and not target.procmon.post_send():
self.logger.info("procmon detected access violation on test case #%d" % self.total_mutant_index)
# retrieve the primitive that caused the crash and increment it's individual crash count.
self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant, 0) + 1
# notify with as much information as possible.
if self.fuzz_node.mutant.name:
msg = "primitive name: %s, " % self.fuzz_node.mutant.name
else:
msg = "primitive lacks a name, "
msg += "type: %s, default value: %s" % (self.fuzz_node.mutant.s_type, self.fuzz_node.mutant.original_value)
self.logger.info(msg)
# print crash synopsis
self.procmon_results[self.total_mutant_index] = target.procmon.get_crash_synopsis()
self.logger.info(self.procmon_results[self.total_mutant_index].split("\n")[0])
# if the user-supplied crash threshold is reached, exhaust this node.
if self.crashing_primitives[self.fuzz_node.mutant] >= self.crash_threshold:
# as long as we're not a group and not a repeat.
if not isinstance(self.fuzz_node.mutant, primitives.group):
if not isinstance(self.fuzz_node.mutant, blocks.repeat):
skipped = self.fuzz_node.mutant.exhaust()
self.logger.warning("crash threshold reached for this primitive, exhausting %d mutants." % skipped)
self.total_mutant_index += skipped
self.fuzz_node.mutant_index += skipped
# start the target back up.
# If it returns False, stop the test
if self.restart_target(target, stop_first=False) == False:
self.logger.critical("Restarting the target failed, exiting.")
self.export_file()
try:
self.thread.join()
except:
self.logger.debug("No server launched")
sys.exit(0)
####################################################################################################################
def post_send (self, sock):
'''
Overload or replace this routine to specify actions to run after to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to tear down the RPC request.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def pre_send (self, sock):
'''
Overload or replace this routine to specify actions to run prior to each fuzz request. The order of events is
as follows::
pre_send() - req - callback ... req - callback - post_send()
When fuzzing RPC for example, register this method to establish the RPC bind.
@see: pre_send()
@type sock: Socket
@param sock: Connected socket to target
'''
# default to doing nothing.
pass
####################################################################################################################
def restart_target (self, target, stop_first=True):
'''
Restart the fuzz target. If a VMControl is available revert the snapshot, if a process monitor is available
restart the target process. Otherwise, do nothing.
@type target: session.target
@param target: Target we are restarting
'''
# vm restarting is the preferred method so try that first.
if target.vmcontrol:
self.logger.warning("restarting target virtual machine")
target.vmcontrol.restart_target()
# if we have a connected process monitor, restart the target process.
elif target.procmon:
self.logger.warning("restarting target process")
if stop_first:
target.procmon.stop_target()
if not target.procmon.start_target():
return False
# give the process a few seconds to settle in.
time.sleep(3)
# otherwise all we can do is wait a while for the target to recover on its own.
else:
self.logger.error("no vmcontrol or procmon channel available ... sleeping for %d seconds" % self.restart_sleep_time)
time.sleep(self.restart_sleep_time)
# TODO: should be good to relaunch test for crash before returning False
return False
# pass specified target parameters to the PED-RPC server to re-establish connections.
target.pedrpc_connect()
####################################################################################################################
def server_init (self):
'''
Called by fuzz() on first run (not on recursive re-entry) to initialize variables, web interface, etc...
'''
self.total_mutant_index = 0
self.total_num_mutations = self.num_mutations()
# web interface thread doesn't catch KeyboardInterrupt
# add a signal handler, and exit on SIGINT
# TODO: should wait for the end of the ongoing test case, and stop gracefully netmon and procmon
# TODO: doesn't work on OS where the signal module isn't available
try:
import signal
self.signal_module = True
except:
self.signal_module = False
if self.signal_module:
def exit_abruptly(signal, frame):
'''Save current settings (just in case) and exit'''
self.export_file()
self.logger.critical("SIGINT received ... exiting")
try:
self.thread.join()
except:
self.logger.debug( "No server launched")
sys.exit(0)
signal.signal(signal.SIGINT, exit_abruptly)
# spawn the web interface.
self.thread = web_interface_thread(self)
self.thread.start()
####################################################################################################################
def transmit (self, sock, node, edge, target):
'''
Render and transmit a node, process callbacks accordingly.
@type sock: Socket
@param sock: Socket to transmit node on
@type node: Request (Node)
@param node: Request/Node to transmit
@type edge: Connection (pgraph.edge)
@param edge: Edge along the current fuzz path from "node" to next node.
@type target: session.target
@param target: Target we are transmitting to
'''
data = None
# if the edge has a callback, process it. the callback has the option to render the node, modify it and return.
if edge.callback:
data = edge.callback(self, node, edge, sock)
self.logger.info("xmitting: [%d.%d]" % (node.id, self.total_mutant_index))
# if no data was returned by the callback, render the node here.
if not data:
data = node.render()
# if data length is > 65507 and proto is UDP, truncate it.
# TODO: this logic does not prevent duplicate test cases, need to address this in the future.
if self.proto == socket.SOCK_DGRAM:
# max UDP packet size.
# TODO: anyone know how to determine this value smarter?
# - See http://stackoverflow.com/questions/25841/maximum-buffer-length-for-sendto to fix this
MAX_UDP = 65507
if os.name != "nt" and os.uname()[0] == "Darwin":
MAX_UDP = 9216
if len(data) > MAX_UDP:
self.logger.debug("Too much data for UDP, truncating to %d bytes" % MAX_UDP)
data = data[:MAX_UDP]
try:
if self.proto == socket.SOCK_STREAM:
sock.send(data)
else:
sock.sendto(data, (self.targets[0].host, self.targets[0].port))
self.logger.debug("Packet sent : " + repr(data))
except Exception, inst:
self.logger.error("Socket error, send: %s" % inst)
if self.proto == (socket.SOCK_STREAM or socket.SOCK_DGRAM):
# TODO: might have a need to increase this at some point. (possibly make it a class parameter)
try:
self.last_recv = sock.recv(10000)
except Exception, e:
self.last_recv = ""
else:
self.last_recv = ""
if len(self.last_recv) > 0:
self.logger.debug("received: [%d] %s" % (len(self.last_recv), repr(self.last_recv)))
else:
self.logger.warning("Nothing received on socket.")
# Increment individual crash count
self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant,0) +1
# Note crash information
self.protmon_results[self.total_mutant_index] = data ;
#print self.protmon_results
########################################################################################################################
class web_interface_handler (BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
self.session = None
def commify (self, number):
number = str(number)
processing = 1
regex = re.compile(r"^(-?\d+)(\d{3})")
while processing:
(number, processing) = regex.subn(r"\1,\2",number)
return number
def do_GET (self):
self.do_everything()
def do_HEAD (self):
self.do_everything()
def do_POST (self):
self.do_everything()
def do_everything (self):
if "pause" in self.path:
self.session.pause_flag = True
if "resume" in self.path:
self.session.pause_flag = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if "view_crash" in self.path:
response = self.view_crash(self.path)
elif "view_pcap" in self.path:
response = self.view_pcap(self.path)
else:
response = self.view_index()
self.wfile.write(response)
def log_error (self, *args, **kwargs):
pass
def log_message (self, *args, **kwargs):
pass
def version_string (self):
return "Sulley Fuzz Session"
def view_crash (self, path):
test_number = int(path.split("/")[-1])
return "<html><pre>%s</pre></html>" % self.session.procmon_results[test_number]
def view_pcap (self, path):
return path
def view_index (self):
response = """
<html>
<head>
<meta http-equiv="refresh" content="5">
<title>Sulley Fuzz Control</title>
<style>
a:link {color: #FF8200; text-decoration: none;}
a:visited {color: #FF8200; text-decoration: none;}
a:hover {color: #C5C5C5; text-decoration: none;}
body
{
background-color: #000000;
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #FFFFFF;
}
td
{
font-family: Arial, Helvetica, sans-serif;
font-size: 12px;
color: #A0B0B0;
}
.fixed
{
font-family: Courier New;
font-size: 12px;
color: #A0B0B0;
}
.input
{
font-family: Arial, Helvetica, sans-serif;
font-size: 11px;
color: #FFFFFF;
background-color: #333333;
border: thin none;
height: 20px;
}
</style>
</head>
<body>
<center>
<table border=0 cellpadding=5 cellspacing=0 width=750><tr><td>
<!-- begin bounding table -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td><div style="font-size: 20px;">Sulley Fuzz Control</div></td>
<td align=right><div style="font-weight: bold; font-size: 20px;">%(status)s</div></td>
</tr>
<tr bgcolor="#111111">
<td colspan=2 align="center">
<table border=0 cellpadding=0 cellspacing=5>
<tr bgcolor="#111111">
<td><b>Total:</b></td>
<td>%(total_mutant_index)s</td>
<td>of</td>
<td>%(total_num_mutations)s</td>
<td class="fixed">%(progress_total_bar)s</td>
<td>%(progress_total)s</td>
</tr>
<tr bgcolor="#111111">
<td><b>%(current_name)s:</b></td>
<td>%(current_mutant_index)s</td>
<td>of</td>
<td>%(current_num_mutations)s</td>
<td class="fixed">%(progress_current_bar)s</td>
<td>%(progress_current)s</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<form method=get action="/pause">
<input class="input" type="submit" value="Pause">
</form>
</td>
<td align=right>
<form method=get action="/resume">
<input class="input" type="submit" value="Resume">
</form>
</td>
</tr>
</table>
<!-- begin procmon results -->
<table border=0 cellpadding=5 cellspacing=0 width="100%%">
<tr bgcolor="#333333">
<td nowrap>Test Case #</td>
<td>Crash Synopsis</td>
<td nowrap>Captured Bytes</td>
</tr>
"""
keys = self.session.procmon_results.keys()
keys.sort()
for key in keys:
val = self.session.procmon_results[key]
bytes = " "
if self.session.netmon_results.has_key(key):
bytes = self.commify(self.session.netmon_results[key])
response += '<tr><td class="fixed"><a href="/view_crash/%d">%06d</a></td><td>%s</td><td align=right>%s</td></tr>' % (key, key, val.split("\n")[0], bytes)
response += """
<!-- end procmon results -->
</table>
<!-- end bounding table -->
</td></tr></table>
</center>
</body>
</html>
"""
# what is the fuzzing status.
if self.session.pause_flag:
status = "<font color=red>PAUSED</font>"
else:
status = "<font color=green>RUNNING</font>"
# if there is a current fuzz node.
if self.session.fuzz_node:
# which node (request) are we currently fuzzing.
if self.session.fuzz_node.name:
current_name = self.session.fuzz_node.name
else:
current_name = "[N/A]"
# render sweet progress bars.
progress_current = float(self.session.fuzz_node.mutant_index) / float(self.session.fuzz_node.num_mutations())
num_bars = int(progress_current * 50)
progress_current_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_current = "%.3f%%" % (progress_current * 100)
progress_total = float(self.session.total_mutant_index) / float(self.session.total_num_mutations)
num_bars = int(progress_total * 50)
progress_total_bar = "[" + "=" * num_bars + " " * (50 - num_bars) + "]"
progress_total = "%.3f%%" % (progress_total * 100)
response %= \
{
"current_mutant_index" : self.commify(self.session.fuzz_node.mutant_index),
"current_name" : current_name,
"current_num_mutations" : self.commify(self.session.fuzz_node.num_mutations()),
"progress_current" : progress_current,
"progress_current_bar" : progress_current_bar,
"progress_total" : progress_total,
"progress_total_bar" : progress_total_bar,
"status" : status,
"total_mutant_index" : self.commify(self.session.total_mutant_index),
"total_num_mutations" : self.commify(self.session.total_num_mutations),
}
else:
response %= \
{
"current_mutant_index" : "",
"current_name" : "",
"current_num_mutations" : "",
"progress_current" : "",
"progress_current_bar" : "",
"progress_total" : "",
"progress_total_bar" : "",
"status" : "<font color=yellow>UNAVAILABLE</font>",
"total_mutant_index" : "",
"total_num_mutations" : "",
}
return response
########################################################################################################################
class web_interface_server (BaseHTTPServer.HTTPServer):
'''
http://docs.python.org/lib/module-BaseHTTPServer.html
'''
def __init__(self, server_address, RequestHandlerClass, session):
BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.RequestHandlerClass.session = session
########################################################################################################################
class web_interface_thread (threading.Thread):
def __init__ (self, session):
threading.Thread.__init__(self, name="SulleyWebServer")
self._stopevent = threading.Event()
self.session = session
self.server = None
def run (self):
self.server = web_interface_server(('', self.session.web_port), web_interface_handler, self.session)
while not self._stopevent.isSet():
self.server.handle_request()
def join(self, timeout=None):
# A little dirty but no other solution afaik
self._stopevent.set()
conn = httplib.HTTPConnection("localhost:%d" % self.session.web_port)
conn.request("GET", "/")
conn.getresponse()
| gpl-2.0 |
glorizen/nupic | nupic/regions/PictureSensorExplorers/inward.py | 17 | 3835 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines InwardPictureExplorer, an explorer for
PictureSensor.
"""
from nupic.regions.PictureSensor import PictureSensor
from nupic.math.cross import cross
class InwardPictureExplorer(PictureSensor.PictureExplorer):
"""
A PictureSensor explorer that deterministically generates
inward sweeps starting at each point along a square perimeter
of side length 1+2*radialLength, with each such sweep
taking the most expeditious route to the center position.
"""
# Pre-computed table
_inwardNeighbors = tuple([delta for delta in cross([-1,0,1],[-1,0,1])])
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return ( 'radialLength',)
def initSequence(self, state, params):
"""
Initiate the next sequence at the appropriate point
along the block perimeter.
"""
radialLength = params['radialLength']
# Take into account repetitions
iterCount = self._getIterCount() // self._getNumRepetitions()
# numRadials are all the positions on the boundary
# Calculation 1) (2*radialLength+1)**2 - (2*radialLength-1)**2
# Calculation 2) (2*randialLength+1)*4 - 4
# numRadials, both ways is 8*radialLength
# And each radial is radialLength + 1 iterations
numRadialsPerCat = 8 * radialLength
numItersPerCat = numRadialsPerCat * (radialLength + 1)
numCats = self._getNumCategories()
catIndex = iterCount // numItersPerCat
catIterCount = iterCount % numItersPerCat
radialIndex = catIterCount // (radialLength + 1)
# Determine quadrants: 0 (top), 1 (right), 2 (bottom), 3 (left)
quadrantIndex = radialIndex // (2 * radialLength)
radialPosn = catIterCount % (radialLength + 1)
quadrantPosn = radialIndex % (2 * radialLength)
# Determine start position of this radial
posnX, posnY = {
0: (quadrantPosn - radialLength, -radialLength),
1: (radialLength, quadrantPosn - radialLength),
2: (radialLength - quadrantPosn, radialLength),
3: (-radialLength, radialLength - quadrantPosn),
}[quadrantIndex]
# Override default state
state['posnX'] = posnX
state['posnY'] = posnY
state['velocityX'] = 0
state['velocityY'] = 0
state['angularPosn'] = 0
state['angularVelocity'] = 0
state['catIndex'] = catIndex
def updateSequence(self, state, params):
"""
Move to the neighbor position closest to the center
"""
posnX = state['posnX']
posnY = state['posnY']
# Compute neighbor with minimal euclidean distance to center
neighbors = [(posnX + dx, posnY + dy) for dx, dy in self._inwardNeighbors]
state['posnX'], state['posnY'] = min(neighbors, key=lambda a: a[0]**2+a[1]**2)
| agpl-3.0 |
pdp10/sbpipe | tests/test_snake_copasi_pe.py | 2 | 3261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import unittest
import subprocess
from tests.context import sbpipe
from sbpipe.utils.dependencies import is_py_package_installed
class TestPeSnake(unittest.TestCase):
_orig_wd = os.getcwd()
_ir_folder = 'snakemake'
_output = 'OK'
@classmethod
def setUpClass(cls):
os.chdir(cls._ir_folder)
try:
subprocess.Popen(['CopasiSE'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0]
except OSError as e:
cls._output = 'CopasiSE not found: SKIP ... '
return
if not is_py_package_installed('sbpipe'):
cls._output = 'sbpipe not installed: SKIP ... '
if not is_py_package_installed('snakemake'):
cls._output = 'snakemake not installed: SKIP ... '
if not os.path.exists('sbpipe_pe.snake'):
cls._output = 'snakemake workflow not found: SKIP ... '
@classmethod
def tearDownClass(cls):
os.chdir(cls._orig_wd)
def setUp(self):
pass
def tearDown(self):
pass
def test_pe_snake1(self):
if self._output == 'OK':
from snakemake import snakemake
self.assertTrue(
snakemake(snakefile='sbpipe_pe.snake',
configfile='ir_model_param_estim.yaml',
cores=7,
forceall=True,
quiet=True))
else:
sys.stdout.write(self._output)
sys.stdout.flush()
def test_pe_snake2(self):
if self._output == 'OK':
from snakemake import snakemake
self.assertTrue(
snakemake(snakefile='sbpipe_pe.snake',
configfile='ir_model_non_identif_param_estim.yaml',
cores=7,
forceall=True,
quiet=True))
else:
sys.stdout.write(self._output)
sys.stdout.flush()
if __name__ == '__main__':
unittest.main(verbosity=2)
| mit |
dsgouda/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Lro/fixtures/acceptancetestslro/models/resource.py | 32 | 1506 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar type: Resource Type
:vartype type: str
:param tags:
:type tags: dict
:param location: Resource Location
:type location: str
:ivar name: Resource Name
:vartype name: str
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, tags=None, location=None):
self.id = None
self.type = None
self.tags = tags
self.location = location
self.name = None
| mit |
bva24/smart | cartridge/shop/managers.py | 5 | 7802 | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import zip
from collections import defaultdict
from datetime import datetime, timedelta
from django.db.models import Manager, Q
from django.utils.datastructures import SortedDict
from django.utils.timezone import now
from mezzanine.conf import settings
from mezzanine.core.managers import CurrentSiteManager
class CartManager(Manager):
def from_request(self, request):
"""
Return a cart by ID stored in the session, creating it if not
found as well as removing old carts prior to creating a new
cart.
"""
cart_id = request.session.get("cart", None)
cart = None
if cart_id:
try:
cart = self.current().get(id=cart_id)
except self.model.DoesNotExist:
request.session["cart"] = None
else:
# Update timestamp and clear out old carts.
cart.last_updated = now()
cart.save()
self.expired().delete()
if not cart:
# Forget what checkout step we were up to.
try:
del request.session["order"]["step"]
request.session.modified = True
except KeyError:
pass
from cartridge.shop.utils import EmptyCart
cart = EmptyCart(request)
return cart
def expiry_time(self):
"""
Datetime for expired carts.
"""
return now() - timedelta(minutes=settings.SHOP_CART_EXPIRY_MINUTES)
def current(self):
"""
Unexpired carts.
"""
return self.filter(last_updated__gte=self.expiry_time())
def expired(self):
"""
Expired carts.
"""
return self.filter(last_updated__lt=self.expiry_time())
class OrderManager(CurrentSiteManager):
def from_request(self, request):
"""
Returns the last order made by session key. Used for
Google Anayltics order tracking in the order complete view,
and in tests.
"""
orders = self.filter(key=request.session.session_key).order_by("-id")
if orders:
return orders[0]
raise self.model.DoesNotExist
def get_for_user(self, order_id, request):
"""
Used for retrieving a single order, ensuring the user in
the given request object can access it.
"""
lookup = {"id": order_id}
if not request.user.is_authenticated():
lookup["key"] = request.session.session_key
elif not request.user.is_staff:
lookup["user_id"] = request.user.id
return self.get(**lookup)
class ProductOptionManager(Manager):
def as_fields(self):
"""
Return a dict of product options as their field names and
choices.
"""
options = defaultdict(list)
for option in self.all():
options["option%s" % option.type].append(option.name)
return options
class ProductVariationManager(Manager):
use_for_related_fields = True
def _empty_options_lookup(self, exclude=None):
"""
Create a lookup dict of field__isnull for options fields.
"""
if not exclude:
exclude = {}
return dict([("%s__isnull" % f.name, True)
for f in self.model.option_fields() if f.name not in exclude])
def create_from_options(self, options):
"""
Create all unique variations from the selected options.
"""
if options:
options = SortedDict(options)
# Build all combinations of options.
variations = [[]]
for values_list in list(options.values()):
variations = [x + [y] for x in variations for y in values_list]
for variation in variations:
# Lookup unspecified options as null to ensure a
# unique filter.
variation = dict(list(zip(list(options.keys()), variation)))
lookup = dict(variation)
lookup.update(self._empty_options_lookup(exclude=variation))
try:
self.get(**lookup)
except self.model.DoesNotExist:
self.create(**variation)
def manage_empty(self):
"""
Create an empty variation (no options) if none exist,
otherwise if multiple variations exist ensure there is no
redundant empty variation. Also ensure there is at least one
default variation.
"""
total_variations = self.count()
if total_variations == 0:
self.create()
elif total_variations > 1:
self.filter(**self._empty_options_lookup()).delete()
try:
self.get(default=True)
except self.model.DoesNotExist:
first_variation = self.all()[0]
first_variation.default = True
first_variation.save()
def set_default_images(self, deleted_image_ids):
"""
Assign the first image for the product to each variation that
doesn't have an image. Also remove any images that have been
deleted via the admin to avoid invalid image selections.
"""
variations = self.all()
if not variations:
return
image = variations[0].product.images.exclude(id__in=deleted_image_ids)
if image:
image = image[0]
for variation in variations:
save = False
if str(variation.image_id) in deleted_image_ids:
variation.image = None
save = True
if image and not variation.image:
variation.image = image
save = True
if save:
variation.save()
class ProductActionManager(Manager):
use_for_related_fields = True
def _action_for_field(self, field):
"""
Increases the given field by datetime.today().toordinal()
which provides a time scaling value we can order by to
determine popularity over time.
"""
timestamp = datetime.today().toordinal()
action, created = self.get_or_create(timestamp=timestamp)
setattr(action, field, getattr(action, field) + 1)
action.save()
def added_to_cart(self):
"""
Increase total_cart when product is added to cart.
"""
self._action_for_field("total_cart")
def purchased(self):
"""
Increase total_purchased when product is purchased.
"""
self._action_for_field("total_purchase")
class DiscountCodeManager(Manager):
def active(self, *args, **kwargs):
"""
Items flagged as active and in valid date range if date(s) are
specified.
"""
n = now()
valid_from = Q(valid_from__isnull=True) | Q(valid_from__lte=n)
valid_to = Q(valid_to__isnull=True) | Q(valid_to__gte=n)
valid = self.filter(valid_from, valid_to, active=True)
return valid.exclude(uses_remaining=0)
def get_valid(self, code, cart):
"""
Items flagged as active and within date range as well checking
that the given cart contains items that the code is valid for.
"""
total_price_valid = (Q(min_purchase__isnull=True) |
Q(min_purchase__lte=cart.total_price()))
discount = self.active().get(total_price_valid, code=code)
products = discount.all_products()
if products.count() > 0:
if products.filter(variations__sku__in=cart.skus()).count() == 0:
raise self.model.DoesNotExist
return discount
| bsd-2-clause |
soarpenguin/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_project.py | 9 | 7686 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_project
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower projects
description:
- Create, update, or destroy Ansible Tower projects. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the project.
required: True
default: null
description:
description:
- Description to use for the project.
required: False
default: null
scm_type:
description:
- Type of scm resource.
required: False
default: "manual"
choices: ["manual", "git", "hg", "svn"]
scm_url:
description:
- URL of scm resource.
required: False
default: null
local_path:
description:
- The server playbook directory for manual projects.
required: False
default: null
scm_branch:
description:
- The branch to use for the scm resource.
required: False
default: null
scm_credential:
description:
- Name of the credential to use with this scm resource.
required: False
default: null
scm_clean:
description:
- Remove local modifications before updating.
required: False
default: False
scm_delete_on_update:
description:
- Remove the repository completely before updating.
required: False
default: False
scm_update_on_launch:
description:
- Before an update to the local repository before launching a job with this project.
required: False
default: False
organization:
description:
- Primary key of organization for project.
required: False
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower project
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
description=dict(),
organization=dict(),
scm_type=dict(choices=['manual', 'git', 'hg', 'svn'], default='manual'),
scm_url=dict(),
scm_branch=dict(),
scm_credential=dict(),
scm_clean=dict(type='bool', default=False),
scm_delete_on_update=dict(type='bool', default=False),
scm_update_on_launch=dict(type='bool', default=False),
local_path=dict(),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
scm_type = module.params.get('scm_type')
if scm_type == "manual":
scm_type = ""
scm_url = module.params.get('scm_url')
local_path = module.params.get('local_path')
scm_branch = module.params.get('scm_branch')
scm_credential = module.params.get('scm_credential')
scm_clean = module.params.get('scm_clean')
scm_delete_on_update = module.params.get('scm_delete_on_update')
scm_update_on_launch = module.params.get('scm_update_on_launch')
state = module.params.get('state')
json_output = {'project': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
project = tower_cli.get_resource('project')
try:
if state == 'present':
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update project, organization not found: {0}'.format(organization), changed=False)
try:
cred_res = tower_cli.get_resource('credential')
cred = cred_res.get(name=scm_credential)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update project, credential not found: {0}'.format(scm_credential), changed=False)
result = project.modify(name=name, description=description,
organization=org['id'],
scm_type=scm_type, scm_url=scm_url, local_path=local_path,
scm_branch=scm_branch, scm_clean=scm_clean, credential=cred['id'],
scm_delete_on_update=scm_delete_on_update,
scm_update_on_launch=scm_update_on_launch,
create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = project.delete(name=name)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update project: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
ctiller/grpc | tools/line_count/yaml2csv.py | 8 | 1503 | #!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import datetime
import csv
argp = argparse.ArgumentParser(description='Convert cloc yaml to bigquery csv')
argp.add_argument('-i', '--input', type=str)
argp.add_argument('-d',
'--date',
type=str,
default=datetime.date.today().strftime('%Y-%m-%d'))
argp.add_argument('-o', '--output', type=str, default='out.csv')
args = argp.parse_args()
data = yaml.load(open(args.input).read())
with open(args.output, 'w') as outf:
writer = csv.DictWriter(
outf, ['date', 'name', 'language', 'code', 'comment', 'blank'])
for key, value in data.iteritems():
if key == 'header':
continue
if key == 'SUM':
continue
if key.startswith('third_party/'):
continue
row = {'name': key, 'date': args.date}
row.update(value)
writer.writerow(row)
| apache-2.0 |
ZhaoCJ/django | django/db/migrations/graph.py | 5 | 6308 | from django.utils.datastructures import OrderedSet
from django.db.migrations.state import ProjectState
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.nodes = {}
self.dependencies = {}
self.dependents = {}
def add_node(self, node, implementation):
self.nodes[node] = implementation
def add_dependency(self, child, parent):
if child not in self.nodes:
raise KeyError("Dependency references nonexistent child node %r" % (child,))
if parent not in self.nodes:
raise KeyError("Dependency references nonexistent parent node %r" % (parent,))
self.dependencies.setdefault(child, set()).add(parent)
self.dependents.setdefault(parent, set()).add(child)
def forwards_plan(self, node):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependencies.get(x, set()))
def backwards_plan(self, node):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependents.get(x, set()))
def root_nodes(self):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependencies.get(node, set())):
roots.add(node)
return roots
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependents.get(node, set())) and (not app or app == node[0]):
leaves.add(node)
return leaves
def dfs(self, start, get_children):
"""
Dynamic programming based depth first search, for finding dependencies.
"""
cache = {}
def _dfs(start, get_children, path):
# If we already computed this, use that (dynamic programming)
if (start, get_children) in cache:
return cache[(start, get_children)]
# If we've traversed here before, that's a circular dep
if start in path:
raise CircularDependencyError(path[path.index(start):] + [start])
# Build our own results list, starting with us
results = []
results.append(start)
# We need to add to results all the migrations this one depends on
children = sorted(get_children(start))
path.append(start)
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
# Use OrderedSet to ensure only one instance of each result
results = list(OrderedSet(results))
# Populate DP cache
cache[(start, get_children)] = results
# Done!
return results
return _dfs(start, get_children, [])
def __str__(self):
return "Graph: %s nodes, %s edges" % (len(self.nodes), sum(len(x) for x in self.dependencies.values()))
def project_state(self, nodes=None, at_end=True):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState()
for node in plan:
project_state = self.nodes[node].mutate_state(project_state)
return project_state
class CircularDependencyError(Exception):
"""
Raised when there's an impossible-to-resolve circular dependency.
"""
pass
| bsd-3-clause |
crazyyoung01/vv | vn.trader/vtServer.py | 3 | 2942 | # encoding: utf-8
import sys
import os
from datetime import datetime
from time import sleep
from threading import Thread
import eventType
from vnrpc import RpcServer
from vtEngine import MainEngine
########################################################################
class VtServer(RpcServer):
"""vn.trader服务器"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(VtServer, self).__init__(repAddress, pubAddress)
self.usePickle()
# 创建主引擎对象
self.engine = MainEngine()
# 注册主引擎的方法到服务器的RPC函数
self.register(self.engine.connect)
self.register(self.engine.subscribe)
self.register(self.engine.sendOrder)
self.register(self.engine.cancelOrder)
self.register(self.engine.qryAccont)
self.register(self.engine.qryPosition)
self.register(self.engine.exit)
self.register(self.engine.writeLog)
self.register(self.engine.dbConnect)
self.register(self.engine.dbInsert)
self.register(self.engine.dbQuery)
self.register(self.engine.dbUpdate)
self.register(self.engine.getContract)
self.register(self.engine.getAllContracts)
self.register(self.engine.getOrder)
self.register(self.engine.getAllWorkingOrders)
self.register(self.engine.getAllGatewayNames)
# 注册事件引擎发送的事件处理监听
self.engine.eventEngine.registerGeneralHandler(self.eventHandler)
#----------------------------------------------------------------------
def eventHandler(self, event):
"""事件处理"""
self.publish(event.type_, event)
#----------------------------------------------------------------------
def stopServer(self):
"""停止服务器"""
# 关闭引擎
self.engine.exit()
# 停止服务器线程
self.stop()
#----------------------------------------------------------------------
def printLog(content):
"""打印日志"""
print datetime.now().strftime("%H:%M:%S"), '\t', content
#----------------------------------------------------------------------
def runServer():
"""运行服务器"""
repAddress = 'tcp://*:2014'
pubAddress = 'tcp://*:0602'
# 创建并启动服务器
server = VtServer(repAddress, pubAddress)
server.start()
printLog('-'*50)
printLog(u'vn.trader服务器已启动')
# 进入主循环
while True:
printLog(u'请输入exit来关闭服务器')
if raw_input() != 'exit':
continue
printLog(u'确认关闭服务器?yes|no')
if raw_input() == 'yes':
break
server.stopServer()
if __name__ == '__main__':
runServer() | mit |
t3dev/odoo | addons/website_twitter/models/res_config_settings.py | 13 | 4137 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
from odoo import api, fields, models, _
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
TWITTER_EXCEPTION = {
304: _('There was no new data to return.'),
400: _('The request was invalid or cannot be otherwise served. Requests without authentication are considered invalid and will yield this response.'),
401: _('Authentication credentials were missing or incorrect. Maybe screen name tweets are protected.'),
403: _('The request is understood, but it has been refused or access is not allowed. Please check your Twitter API Key and Secret.'),
429: _('Request cannot be served due to the applications rate limit having been exhausted for the resource.'),
500: _('Twitter seems broken. Please retry later. You may consider posting an issue on Twitter forums to get help.'),
502: _('Twitter is down or being upgraded.'),
503: _('The Twitter servers are up, but overloaded with requests. Try again later.'),
504: _('The Twitter servers are up, but the request could not be serviced due to some failure within our stack. Try again later.')
}
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
twitter_api_key = fields.Char(
related='website_id.twitter_api_key', readonly=False,
string='API Key',
help='Twitter API key you can get it from https://apps.twitter.com/')
twitter_api_secret = fields.Char(
related='website_id.twitter_api_secret', readonly=False,
string='API secret',
help='Twitter API secret you can get it from https://apps.twitter.com/')
twitter_screen_name = fields.Char(
related='website_id.twitter_screen_name', readonly=False,
string='Favorites From',
help='Screen Name of the Twitter Account from which you want to load favorites.'
'It does not have to match the API Key/Secret.')
twitter_server_uri = fields.Char(string='Twitter server uri', readonly=True)
def _get_twitter_exception_message(self, error_code):
if error_code in TWITTER_EXCEPTION:
return TWITTER_EXCEPTION[error_code]
else:
return _('HTTP Error: Something is misconfigured')
def _check_twitter_authorization(self):
try:
self.website_id.fetch_favorite_tweets()
except requests.HTTPError as e:
_logger.info("%s - %s" % (e.response.status_code, e.response.reason), exc_info=True)
raise UserError("%s - %s" % (e.response.status_code, e.response.reason) + ':' + self._get_twitter_exception_message(e.response.status_code))
except IOError:
_logger.info(_('We failed to reach a twitter server.'), exc_info=True)
raise UserError(_('Internet connection refused') + ' ' + _('We failed to reach a twitter server.'))
except Exception:
_logger.info(_('Please double-check your Twitter API Key and Secret!'), exc_info=True)
raise UserError(_('Twitter authorization error!') + ' ' + _('Please double-check your Twitter API Key and Secret!'))
@api.model
def create(self, vals):
TwitterConfig = super(ResConfigSettings, self).create(vals)
if vals.get('twitter_api_key') or vals.get('twitter_api_secret') or vals.get('twitter_screen_name'):
TwitterConfig._check_twitter_authorization()
return TwitterConfig
@api.multi
def write(self, vals):
TwitterConfig = super(ResConfigSettings, self).write(vals)
if vals.get('twitter_api_key') or vals.get('twitter_api_secret') or vals.get('twitter_screen_name'):
self._check_twitter_authorization()
return TwitterConfig
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
Params = self.env['ir.config_parameter'].sudo()
res.update({
'twitter_server_uri': '%s/' % Params.get_param('web.base.url', default='http://yourcompany.odoo.com'),
})
return res
| gpl-3.0 |
ph1l/ocemr | ocemr/urls.py | 1 | 10187 | ########################################################################## #
# This file is part of OCEMR.
#
# OCEMR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OCEMR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OCEMR. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################
# Copyright 2011-8 Philip Freeman <[email protected]>
##########################################################################
from django.conf.urls import include, url
from django.conf.urls.static import static
from . import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from django.contrib.auth import views as auth_views
import settings
urlpatterns = [
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', auth_views.LoginView.as_view(template_name='login.html')),
url(r'^accounts/logout/$', auth_views.LogoutView.as_view(template_name='logout.html')),
url(r'^user_prefs/$', views.util.user_prefs),
url(r'^$', views.util.index),
url(r'^user_prefs/change_password/$', views.util.change_password),
url(r'^get_backup/$', views.util.get_backup),
url(r'^restore_backup/$', views.util.restore_backup),
url(r'^village_merge_wizard/$', views.util.village_merge_wizard),
url(r'^village_merge_wizard/(?P<villageId>[0-9]+)/(?P<villageIncorrectId>[0-9]+)/$', views.util.village_merge_wizard_go),
url(r'^autospel_name/(?P<inapp>.+)/(?P<inmodel>.+)/$', views.util.autospel_name),
url(r'^autocomplete_name/(?P<inapp>.+)/(?P<inmodel>.+)/$', views.util.autocomplete_name),
url(r'^autosearch_title/(?P<inapp>.+)/(?P<inmodel>.+)/$', views.util.autosearch_title),
url(r'^blank/$',views.util.blank),
url(r'^close_window/$',views.util.close_window),
url(r'^patient_queue/$', views.patient.patient_queue),
url(r'^patient_queue/(?P<dayoffset>[-0-9]+)/$', views.patient.patient_queue),
url(r'^select_date_for_patient_queue/$', views.patient.select_date_for_patient_queue),
url(r'^patient/(?P<id>\d+)/$', views.patient.patient),
url(r'^patient/edit/age/(?P<id>\d+)/$', views.patient.patient_edit_age),
url(r'^patient/edit/village/(?P<id>\d+)/$', views.patient.patient_edit_village),
url(r'^patient/edit/name/(?P<id>\d+)/$', views.patient.patient_edit_name),
url(r'^patient/edit/note/(?P<id>\d+)/$', views.patient.patient_edit_note),
url(r'^patient/edit/gender/(?P<id>\d+)/$', views.patient.patient_edit_gender),
url(r'^patient/edit/phone/(?P<id>\d+)/$', views.patient.patient_edit_phone),
url(r'^patient/edit/email/(?P<id>\d+)/$', views.patient.patient_edit_email),
url(r'^patient/edit/alt_contact_name/(?P<id>\d+)/$', views.patient.patient_edit_alt_contact_name),
url(r'^patient/edit/alt_contact_phone/(?P<id>\d+)/$', views.patient.patient_edit_alt_contact_phone),
url(r'^patient/search/$', views.patient.patient_search),
url(r'^patient/new/$', views.patient.patient_new),
url(r'^patient/schedule_new_visit/(?P<id>\d+)/$', views.patient.schedule_new_visit),
url(r'^patient/schedule_walkin_visit/(?P<id>\d+)/$', views.patient.schedule_walkin_visit),
url(r'^patient/delete_visit/(?P<id>\d+)/$', views.patient.delete_visit),
url(r'^patient/edit_visit/(?P<id>\d+)/$', views.patient.edit_visit),
url(r'^patient/edit_visit_reason/(?P<id>\d+)/$', views.patient.edit_visit_reason),
url(r'^patient/edit_visit_seen/(?P<id>\d+)/$', views.patient.edit_visit_seen),
url(r'^patient/merge/(?P<id>\d+)/$', views.patient.patient_merge),
url(r'^patient/merge/(?P<id>\d+)/(?P<dupid>\d+)/$', views.patient.patient_do_merge),
url(r'^visit/(?P<id>\d+)/$', views.visit.visit),
url(r'^visit/(?P<id>\d+)/seen/$', views.visit.visit_seen),
url(r'^visit/(?P<id>\d+)/unseen/$', views.visit.visit_unseen),
url(r'^visit/(?P<id>\d+)/claim/$', views.visit.visit_claim),
url(r'^visit/(?P<id>\d+)/unclaim/$', views.visit.visit_unclaim),
url(r'^visit/(?P<id>\d+)/finish/$', views.visit.visit_finish),
url(r'^visit/(?P<id>\d+)/unfinish/$', views.visit.visit_unfinish),
url(r'^visit/(?P<id>\d+)/allergy/new/$', views.visit.visit_allergy_new),
url(r'^visit/(?P<id>\d+)/allergy/delete/(?P<oid>\d+)/$', views.visit.visit_allergy_delete),
url(r'^visit/(?P<id>\d+)/past/$', views.visit.visit_past),
url(r'^visit/(?P<id>\d+)/subj/$', views.visit.visit_subj),
url(r'^visit/(?P<id>\d+)/subj/new/(?P<symptomtypeid>\d+)/$', views.visit.visit_subj_new),
url(r'^visit/(?P<id>\d+)/subj/edit/(?P<visitsymptomid>\d+)/$', views.visit.visit_subj_edit),
url(r'^visit/(?P<id>\d+)/subj/delete/(?P<visitsymptomid>\d+)/$', views.visit.visit_subj_delete),
url(r'^visit/(?P<id>\d+)/obje/$', views.visit.visit_obje),
url(r'^visit/(?P<id>\d+)/obje/vitals/new/$', views.visit.visit_obje_vitals_new),
#url(r'^visit/(?P<id>\d+)/obje/vital/new/(?P<vitaltypeid>\d+)/$', views.visit.visit_obje_vital_new),
url(r'^visit/(?P<id>\d+)/obje/vital/delete/(?P<oid>\d+)/$', views.visit.visit_obje_vital_delete),
url(r'^visit/(?P<id>\d+)/obje/examNote/new/(?P<examnotetypeid>\d+)/$', views.visit.visit_obje_examNote_new),
url(r'^visit/(?P<id>\d+)/obje/examNote/edit/(?P<examnoteid>\d+)/$', views.visit.visit_obje_examNote_edit),
#url(r'^visit/(?P<id>\d+)/obje/examNote/delete/(?P<examnoteid>\d+)/$', views.visit.visit_obje_examNote_delete),
url(r'^visit/(?P<id>\d+)/labs/$', views.visit.visit_labs),
url(r'^visit/(?P<id>\d+)/labs/new/(?P<labtypeid>\d+)/$', views.visit.visit_labs_new),
url(r'^visit/(?P<id>\d+)/plan/$', views.visit.visit_plan),
url(r'^visit/(?P<id>\d+)/plan/diag/new/$', views.visit.visit_plan_diag_new),
url(r'^visit/(?P<id>\d+)/plan/diag/new/(?P<dtid>\d+)/$', views.visit.visit_plan_diag_new_bytype),
url(r'^visit/(?P<id>\d+)/meds/$', views.visit.visit_meds),
url(r'^visit/(?P<id>\d+)/meds/new/(?P<did>\d+)/$', views.visit.visit_meds_new),
url(r'^visit/(?P<id>\d+)/refe/$', views.visit.visit_refe),
url(r'^visit/(?P<id>\d+)/refe/new/$', views.visit.visit_refe_new),
url(r'^visit/(?P<id>\d+)/refe/edit/(?P<refid>\d+)/$', views.visit.visit_refe_edit),
url(r'^visit/(?P<id>\d+)/note/$', views.visit.visit_note),
url(r'^visit/(?P<id>\d+)/collect/$', views.visit.visit_collect),
url(r'^visit/(?P<id>\d+)/bill_amount/$', views.visit.visit_bill_amount),
url(r'^visit/(?P<id>\d+)/cost_estimate_detail/$', views.visit.visit_cost_estimate_detail),
url(r'^visit/(?P<id>\d+)/resolve/$', views.visit.visit_resolve),
url(r'^visit/(?P<id>\d+)/unresolve/$', views.visit.visit_unresolve),
url(r'^visit/(?P<id>\d+)/record/(?P<type>\w+)/$', views.visit.visit_record),
url(r'^visit/(?P<id>\d+)/preg/$', views.pregnancy.pregnancy),
url(r'^visit/(?P<id>\d+)/preg/new/$', views.pregnancy.pregnancy_new),
url(r'^visit/(?P<id>\d+)/preg/edit/(?P<pregid>\d+)/$', views.pregnancy.pregnancy_edit),
url(r'^lab_queue/$', views.lab.lab_queue),
url(r'^lab/(?P<id>\d+)/start/$', views.lab.lab_start),
url(r'^lab/(?P<id>\d+)/cancel/$', views.lab.lab_cancel),
url(r'^lab/(?P<id>\d+)/fail/$', views.lab.lab_fail),
url(r'^lab/(?P<id>\d+)/complete/$', views.lab.lab_complete),
url(r'^lab/(?P<id>\d+)/reorder/$', views.lab.lab_reorder),
url(r'^lab/(?P<id>\d+)/notate/$', views.lab.lab_notate),
url(r'^diag/(?P<id>\d+)/stat_change/(?P<newstat>\w+)/$', views.diag.diag_stat_change),
url(r'^diag/(?P<id>\d+)/edit/notes/$', views.diag.diag_edit_notes),
url(r'^diag/(?P<id>\d+)/history/$', views.diag.diag_history),
url(r'^diag/patienttypehistory/(?P<pid>\d+)/(?P<dtid>\d+)/$', views.diag.diag_patienttypehistory),
url(r'^diag/(?P<id>\d+)/delete/$', views.diag.diag_delete),
url(r'^med_queue/$', views.med.med_queue),
url(r'^meds/(?P<vid>\d+)/$', views.med.meds_view),
url(r'^med/(?P<id>\d+)/dispense/$', views.med.med_dispense),
url(r'^med/(?P<id>\d+)/undo_dispense/$', views.med.med_undo_dispense),
url(r'^med/(?P<id>\d+)/substitute/$', views.med.med_substitute),
url(r'^med/(?P<id>\d+)/cancel/$', views.med.med_cancel),
url(r'^med/(?P<id>\d+)/undo_cancel/$', views.med.med_undo_cancel),
url(r'^med/(?P<id>\d+)/notate/$', views.med.med_notate),
url(r'^med/(?P<id>\d+)/edit/$', views.med.med_edit),
url(r'^reports/$', views.reports.index),
url(r'^reports/legacy/patient/daily/$', views.reports.legacy_patient_daily),
url(r'^reports/diagnosis_patient/$', views.reports.diagnosis_patient),
url(r'^reports/diagnosis/tally/$', views.reports.diagnosis_tally),
url(r'^reports/clinician/tally/$', views.reports.clinician_tally),
url(r'^reports/lab/tally/$', views.reports.lab_tally),
url(r'^reports/med/tally/$', views.reports.med_tally),
url(r'^reports/village/tally/$', views.reports.village_tally),
url(r'^reports/cashflow/$', views.reports.cashflow),
url(r'^reports/accounts_outstanding/$', views.reports.accounts_outstanding),
url(r'^reports/hmis105/$', views.reports.hmis105),
url(r'^graphs/test_matplotlib/$', views.graphs.test_matplotlib),
url(r'^graphs/vitals/(?P<id>\d+)/bp.png$', views.graphs.vitals_bp),
url(r'^graphs/vitals/(?P<id>\d+)/temp.png$', views.graphs.vitals_temp),
url(r'^graphs/vitals/(?P<id>\d+)/hrrr.png$', views.graphs.vitals_hrrr),
url(r'^graphs/vitals/(?P<id>\d+)/hw.png$', views.graphs.vitals_height_weight),
url(r'^graphs/vitals/(?P<id>\d+)/spo2o2.png$', views.graphs.vitals_spo2_o2),
url(r'^graphs/vitals/(?P<id>\d+)/$', views.graphs.vitals_graphs_index),
]
| gpl-3.0 |
aopp/android_kernel_asus_grouper | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
tfeagle/mitmproxy | examples/mitmproxywrapper.py | 37 | 5810 | #!/usr/bin/env python
#
# Helper tool to enable/disable OS X proxy and wrap mitmproxy
#
# Get usage information with:
#
# mitmproxywrapper.py -h
#
import subprocess
import re
import argparse
import contextlib
import os
import sys
class Wrapper(object):
def __init__(self, port, extra_arguments=None):
self.port = port
self.extra_arguments = extra_arguments
def run_networksetup_command(self, *arguments):
return subprocess.check_output(
['sudo', 'networksetup'] + list(arguments))
def proxy_state_for_service(self, service):
state = self.run_networksetup_command(
'-getwebproxy',
service).splitlines()
return dict([re.findall(r'([^:]+): (.*)', line)[0] for line in state])
def enable_proxy_for_service(self, service):
print('Enabling proxy on {}...'.format(service))
for subcommand in ['-setwebproxy', '-setsecurewebproxy']:
self.run_networksetup_command(
subcommand, service, '127.0.0.1', str(
self.port))
def disable_proxy_for_service(self, service):
print('Disabling proxy on {}...'.format(service))
for subcommand in ['-setwebproxystate', '-setsecurewebproxystate']:
self.run_networksetup_command(subcommand, service, 'Off')
def interface_name_to_service_name_map(self):
order = self.run_networksetup_command('-listnetworkserviceorder')
mapping = re.findall(
r'\(\d+\)\s(.*)$\n\(.*Device: (.+)\)$',
order,
re.MULTILINE)
return dict([(b, a) for (a, b) in mapping])
def run_command_with_input(self, command, input):
popen = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdout, stderr) = popen.communicate(input)
return stdout
def primary_interace_name(self):
scutil_script = 'get State:/Network/Global/IPv4\nd.show\n'
stdout = self.run_command_with_input('/usr/sbin/scutil', scutil_script)
interface, = re.findall(r'PrimaryInterface\s*:\s*(.+)', stdout)
return interface
def primary_service_name(self):
return self.interface_name_to_service_name_map()[
self.primary_interace_name()]
def proxy_enabled_for_service(self, service):
return self.proxy_state_for_service(service)['Enabled'] == 'Yes'
def toggle_proxy(self):
new_state = not self.proxy_enabled_for_service(
self.primary_service_name())
for service_name in self.connected_service_names():
if self.proxy_enabled_for_service(service_name) and not new_state:
self.disable_proxy_for_service(service_name)
elif not self.proxy_enabled_for_service(service_name) and new_state:
self.enable_proxy_for_service(service_name)
def connected_service_names(self):
scutil_script = 'list\n'
stdout = self.run_command_with_input('/usr/sbin/scutil', scutil_script)
service_ids = re.findall(r'State:/Network/Service/(.+)/IPv4', stdout)
service_names = []
for service_id in service_ids:
scutil_script = 'show Setup:/Network/Service/{}\n'.format(
service_id)
stdout = self.run_command_with_input(
'/usr/sbin/scutil',
scutil_script)
service_name, = re.findall(r'UserDefinedName\s*:\s*(.+)', stdout)
service_names.append(service_name)
return service_names
def wrap_mitmproxy(self):
with self.wrap_proxy():
cmd = ['mitmproxy', '-p', str(self.port)]
if self.extra_arguments:
cmd.extend(self.extra_arguments)
subprocess.check_call(cmd)
def wrap_honeyproxy(self):
with self.wrap_proxy():
popen = subprocess.Popen('honeyproxy.sh')
try:
popen.wait()
except KeyboardInterrupt:
popen.terminate()
@contextlib.contextmanager
def wrap_proxy(self):
connected_service_names = self.connected_service_names()
for service_name in connected_service_names:
if not self.proxy_enabled_for_service(service_name):
self.enable_proxy_for_service(service_name)
yield
for service_name in connected_service_names:
if self.proxy_enabled_for_service(service_name):
self.disable_proxy_for_service(service_name)
@classmethod
def ensure_superuser(cls):
if os.getuid() != 0:
print('Relaunching with sudo...')
os.execv('/usr/bin/sudo', ['/usr/bin/sudo'] + sys.argv)
@classmethod
def main(cls):
parser = argparse.ArgumentParser(
description='Helper tool for OS X proxy configuration and mitmproxy.',
epilog='Any additional arguments will be passed on unchanged to mitmproxy.')
parser.add_argument(
'-t',
'--toggle',
action='store_true',
help='just toggle the proxy configuration')
# parser.add_argument('--honeyproxy', action='store_true', help='run honeyproxy instead of mitmproxy')
parser.add_argument(
'-p',
'--port',
type=int,
help='override the default port of 8080',
default=8080)
args, extra_arguments = parser.parse_known_args()
wrapper = cls(port=args.port, extra_arguments=extra_arguments)
if args.toggle:
wrapper.toggle_proxy()
# elif args.honeyproxy:
# wrapper.wrap_honeyproxy()
else:
wrapper.wrap_mitmproxy()
if __name__ == '__main__':
Wrapper.ensure_superuser()
Wrapper.main()
| mit |
fredkingham/blog-of-fred | django/contrib/gis/db/models/sql/query.py | 379 | 5314 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = dict([(x, None) for x in (
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
)])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| bsd-3-clause |
KohlsTechnology/ansible | lib/ansible/plugins/cache/mongodb.py | 31 | 5534 | # (c) 2018, Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
cache: mongodb
short_description: Use MongoDB for caching
description:
- This cache uses per host records saved in MongoDB.
version_added: "2.5"
requirements:
- pymongo>=3
options:
_uri:
description:
- MongoDB Connection String URI
required: False
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
- section: defaults
_timeout:
default: 86400
description: Expiration timeout in seconds for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import datetime
from contextlib import contextmanager
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
import pymongo
except ImportError:
raise AnsibleError("The 'pymongo' python module is required for the mongodb fact cache, 'pip install pymongo>=3.0'")
class CacheModule(BaseCacheModule):
"""
A caching module backed by mongodb.
"""
def __init__(self, *args, **kwargs):
self._timeout = int(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._managed_indexes = False
def _manage_indexes(self, collection):
'''
This function manages indexes on the mongo collection.
We only do this once, at run time based on _managed_indexes,
rather than per connection instantiation as that would be overkill
'''
_timeout = self._timeout
if _timeout and _timeout > 0:
try:
collection.create_index(
'date',
name='ttl',
expireAfterSeconds=_timeout
)
except pymongo.errors.OperationFailure:
# We make it here when the fact_caching_timeout was set to a different value between runs
collection.drop_index('ttl')
return self._manage_indexes(collection)
else:
collection.drop_index('ttl')
@contextmanager
def _collection(self):
'''
This is a context manager for opening and closing mongo connections as needed. This exists as to not create a global
connection, due to pymongo not being fork safe (http://api.mongodb.com/python/current/faq.html#is-pymongo-fork-safe)
'''
mongo = pymongo.MongoClient(C.CACHE_PLUGIN_CONNECTION)
try:
db = mongo.get_default_database()
except pymongo.errors.ConfigurationError:
# We'll fall back to using ``ansible`` as the database if one was not provided
# in the MongoDB Connection String URI
db = mongo['ansible']
# The collection is hard coded as ``cache``, there are no configuration options for this
collection = db['cache']
if not self._managed_indexes:
# Only manage the indexes once per run, not per connection
self._manage_indexes(collection)
self._managed_indexes = True
yield collection
mongo.close()
def _make_key(self, key):
return '%s%s' % (self._prefix, key)
def get(self, key):
if key not in self._cache:
with self._collection() as collection:
value = collection.find_one({'_id': self._make_key(key)})
self._cache[key] = value['data']
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
with self._collection() as collection:
collection.update_one(
{'_id': self._make_key(key)},
{
'$set': {
'_id': self._make_key(key),
'data': value,
'date': datetime.datetime.utcnow()
}
},
upsert=True
)
def keys(self):
with self._collection() as collection:
return [doc['_id'] for doc in collection.find({}, {'_id': True})]
def contains(self, key):
with self._collection() as collection:
return bool(collection.count({'_id': self._make_key(key)}))
def delete(self, key):
del self._cache[key]
with self._collection() as collection:
collection.delete_one({'_id': self._make_key(key)})
def flush(self):
with self._collection() as collection:
collection.delete_many({})
def copy(self):
with self._collection() as collection:
return dict((d['_id'], d['data']) for d in collection.find({}))
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
xindus40223115/w17_test | static/Brython3.1.1-20150328-091302/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| gpl-3.0 |
cjaymes/pyscap | src/scap/model/oval_5/defs/windows/UserSidObjectElement.py | 1 | 1032 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.windows.ObjectType import ObjectType
logger = logging.getLogger(__name__)
class UserSidObjectElement(ObjectType):
MODEL_MAP = {
'tag_name': 'user_sid_object',
'elements': [
{'tag_name': 'user', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0},
],
}
| gpl-3.0 |
alvaroaleman/ansible | lib/ansible/parsing/utils/addresses.py | 123 | 8158 | # Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleParserError, AnsibleError
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
numeric_range = r'''
\[
(?:[0-9]+:[0-9]+) # numeric begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
hexadecimal_range = r'''
\[
(?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
alphanumeric_range = r'''
\[
(?:
[a-z]:[a-z]| # one-char alphabetic range
[0-9]+:[0-9]+ # ...or a numeric one
)
(?::[0-9]+)? # numeric :step (optional)
\]
'''
# Components that match a 16-bit portion of an IPv6 address in hexadecimal
# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
# (0..255) or an [x:y(:z)] numeric range.
ipv6_component = r'''
(?:
[0-9a-f]{{1,4}}| # 0..ffff
{range} # or a numeric range
)
'''.format(range=hexadecimal_range)
ipv4_component = r'''
(?:
[01]?[0-9]{{1,2}}| # 0..199
2[0-4][0-9]| # 200..249
25[0-5]| # 250..255
{range} # or a numeric range
)
'''.format(range=numeric_range)
# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
# characters plus dashes (and underscores) or valid ranges. The label may not
# start or end with a hyphen or an underscore. This is interpolated into the
# hostname pattern below. We don't try to enforce the 63-char length limit.
label = r'''
(?:[\w]|{range}) # Starts with an alphanumeric or a range
(?:[\w_-]|{range})* # Then zero or more of the same or [_-]
(?<![_-]) # ...as long as it didn't end with [_-]
'''.format(range=alphanumeric_range)
patterns = {
# This matches a square-bracketed expression with a port specification. What
# is inside the square brackets is validated later.
'bracketed_hostport': re.compile(
r'''^
\[(.+)\] # [host identifier]
:([0-9]+) # :port number
$
''', re.X
),
# This matches a bare IPv4 address or hostname (or host pattern including
# [x:y(:z)] ranges) with a port specification.
'hostport': re.compile(
r'''^
((?: # We want to match:
[^:\[\]] # (a non-range character
| # ...or...
\[[^\]]*\] # a complete bracketed expression)
)*) # repeated as many times as possible
:([0-9]+) # followed by a port number
$
''', re.X
),
# This matches an IPv4 address, but also permits range expressions.
'ipv4': re.compile(
r'''^
(?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
$
'''.format(i4=ipv4_component), re.X|re.I
),
# This matches an IPv6 address, but also permits range expressions.
#
# This expression looks complex, but it really only spells out the various
# combinations in which the basic unit of an IPv6 address (0..ffff) can be
# written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
# as ::ffff:192.0.2.3.
#
# Note that we can't just use ipaddress.ip_address() because we also have to
# accept ranges in place of each component.
'ipv6': re.compile(
r'''^
(?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
(?:{0}:){{3}}(?::{0}){{1,4}}|
(?:{0}:){{4}}(?::{0}){{1,3}}|
(?:{0}:){{5}}(?::{0}){{1,2}}|
(?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
:(?::{0}){{1,6}}| # ::ffff(:ffff...)
{0}?::| # ffff::, ::
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
(?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
$
'''.format(ipv6_component), re.X|re.I
),
# This matches a hostname or host pattern including [x:y(:z)] ranges.
#
# We roughly follow DNS rules here, but also allow ranges (and underscores).
# In the past, no systematic rules were enforced about inventory hostnames,
# but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
# various metacharacters anyway.
#
# We don't enforce DNS length restrictions here (63 characters per label,
# 253 characters total) or make any attempt to process IDNs.
'hostname': re.compile(
r'''^
{label} # We must have at least one label
(?:\.{label})* # Followed by zero or more .labels
$
'''.format(label=label), re.X|re.I|re.UNICODE
),
}
def parse_address(address, allow_ranges=False):
"""
Takes a string and returns a (host, port) tuple. If the host is None, then
the string could not be parsed as a host identifier with an optional port
specification. If the port is None, then no port was specified.
The host identifier may be a hostname (qualified or not), an IPv4 address,
or an IPv6 address. If allow_ranges is True, then any of those may contain
[x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
The port number is an optional :NN suffix on an IPv4 address or host name,
or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
IPv4 address, or host name. (This means the only way to specify a port for
an IPv6 address is to enclose it in square brackets.)
"""
# First, we extract the port number if one is specified.
port = None
for matching in ['bracketed_hostport', 'hostport']:
m = patterns[matching].match(address)
if m:
(address, port) = m.groups()
port = int(port)
continue
# What we're left with now must be an IPv4 or IPv6 address, possibly with
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
for matching in ['ipv4', 'ipv6', 'hostname']:
m = patterns[matching].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
if not host:
raise AnsibleError("Not a valid network hostname: %s" % address)
# If we get to this point, we know that any included ranges are valid.
# If the caller is prepared to handle them, all is well.
# Otherwise we treat it as a parse failure.
if not allow_ranges and '[' in host:
raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
return (host, port)
| gpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/cloud/__init__.py | 95 | 1145 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for cloud ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=line-too-long,wildcard-import
from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
# pylint: enable=line-too-long,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['BigQueryReader']
remove_undocumented(__name__, _allowed_symbols)
| mit |
uwdata/termite-stm | web2py/gluon/utf8.py | 16 | 30039 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Created by Vladyslav Kozlovskyy (Ukraine) <dbdevelop©gmail.com>
for Web2py project
Utilities and class for UTF8 strings managing
===========================================
"""
import __builtin__
__all__ = ['Utf8']
repr_escape_tab = {}
for i in range(1, 32):
repr_escape_tab[i] = ur'\x%02x' % i
repr_escape_tab[7] = u'\\a'
repr_escape_tab[8] = u'\\b'
repr_escape_tab[9] = u'\\t'
repr_escape_tab[10] = u'\\n'
repr_escape_tab[11] = u'\\v'
repr_escape_tab[12] = u'\\f'
repr_escape_tab[13] = u'\\r'
repr_escape_tab[ord('\\')] = u'\\\\'
repr_escape_tab2 = repr_escape_tab.copy()
repr_escape_tab2[ord('\'')] = u"\\'"
def sort_key(s):
""" Unicode Collation Algorithm (UCA) (http://www.unicode.org/reports/tr10/)
is used for utf-8 and unicode strings sorting and for utf-8 strings
comparison
NOTE: pyuca is a very memory cost module! It loads the whole
"allkey.txt" file (~2mb!) into the memory. But this
functionality is needed only when sort_key() is called as a
part of sort() function or when Utf8 strings are compared.
So, it is a lazy "sort_key" function which (ONLY ONCE, ON ITS
FIRST CALL) imports pyuca and replaces itself with a real
sort_key() function
"""
global sort_key
try:
from gluon.contrib.pyuca import unicode_collator
unicode_sort_key = unicode_collator.sort_key
sort_key = lambda s: unicode_sort_key(
unicode(s, 'utf-8') if isinstance(s, str) else s)
except:
sort_key = lambda s: (
unicode(s, 'utf-8') if isinstance(s, str) else s).lower()
return sort_key(s)
def ord(char):
""" returns unicode id for utf8 or unicode *char* character
SUPPOSE that *char* is an utf-8 or unicode character only
"""
if isinstance(char, unicode):
return __builtin__.ord(char)
return __builtin__.ord(unicode(char, 'utf-8'))
def chr(code):
""" return utf8-character with *code* unicode id """
return Utf8(unichr(code))
def size(string):
""" return length of utf-8 string in bytes
NOTE! The length of correspondent utf-8
string is returned for unicode string
"""
return Utf8(string).__size__()
def truncate(string, length, dots='...'):
""" returns string of length < *length* or truncate
string with adding *dots* suffix to the string's end
args:
length (int): max length of string
dots (str or unicode): string suffix, when string is cutted
returns:
(utf8-str): original or cutted string
"""
text = unicode(string, 'utf-8')
dots = unicode(dots, 'utf-8') if isinstance(dots, str) else dots
if len(text) > length:
text = text[:length - len(dots)] + dots
return str.__new__(Utf8, text.encode('utf-8'))
class Utf8(str):
"""
Class for utf8 string storing and manipulations
The base presupposition of this class usage is:
"ALL strings in the application are either of
utf-8 or unicode type, even when simple str
type is used. UTF-8 is only a "packed" version
of unicode, so Utf-8 and unicode strings are
interchangeable."
CAUTION! This class is slower than str/unicode!
Do NOT use it inside intensive loops. Simply
decode string(s) to unicode before loop and
encode it back to utf-8 string(s) after
intensive calculation.
You can see the benefit of this class in doctests() below
"""
def __new__(cls, content='', codepage='utf-8'):
if isinstance(content, unicode):
return str.__new__(cls, unicode.encode(content, 'utf-8'))
elif codepage in ('utf-8', 'utf8') or isinstance(content, cls):
return str.__new__(cls, content)
else:
return str.__new__(cls, unicode(content, codepage).encode('utf-8'))
def __repr__(self):
r''' # note that we use raw strings to avoid having to use double back slashes below
NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function
utf8.__repr__() works same as str.repr() when processing ascii string
>>> repr(Utf8('abc')) == repr(Utf8("abc")) == repr('abc') == repr("abc") == "'abc'"
True
>>> repr(Utf8('a"b"c')) == repr('a"b"c') == '\'a"b"c\''
True
>>> repr(Utf8("a'b'c")) == repr("a'b'c") == '"a\'b\'c"'
True
>>> repr(Utf8('a\'b"c')) == repr('a\'b"c') == repr(Utf8("a'b\"c")) == repr("a'b\"c") == '\'a\\\'b"c\''
True
>>> repr(Utf8('a\r\nb')) == repr('a\r\nb') == "'a\\r\\nb'" # Test for \r, \n
True
Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string
>>> repr(Utf8('中文字')) == repr(Utf8("中文字")) == "'中文字'" != repr('中文字')
True
>>> repr(Utf8('中"文"字')) == "'中\"文\"字'" != repr('中"文"字')
True
>>> repr(Utf8("中'文'字")) == '"中\'文\'字"' != repr("中'文'字")
True
>>> repr(Utf8('中\'文"字')) == repr(Utf8("中'文\"字")) == '\'中\\\'文"字\'' != repr('中\'文"字') == repr("中'文\"字")
True
>>> repr(Utf8('中\r\n文')) == "'中\\r\\n文'" != repr('中\r\n文') # Test for \r, \n
True
'''
if str.find(self, "'") >= 0 and str.find(self, '"') < 0: # only single quote exists
return '"' + unicode(self, 'utf-8').translate(repr_escape_tab).encode('utf-8') + '"'
else:
return "'" + unicode(self, 'utf-8').translate(repr_escape_tab2).encode('utf-8') + "'"
def __size__(self):
""" length of utf-8 string in bytes """
return str.__len__(self)
def __contains__(self, other):
return str.__contains__(self, Utf8(other))
def __getitem__(self, index):
return str.__new__(Utf8, unicode(self, 'utf-8')[index].encode('utf-8'))
def __getslice__(self, begin, end):
return str.__new__(Utf8, unicode(self, 'utf-8')[begin:end].encode('utf-8'))
def __add__(self, other):
return str.__new__(Utf8, str.__add__(self, unicode.encode(other, 'utf-8')
if isinstance(other, unicode) else other))
def __len__(self):
return len(unicode(self, 'utf-8'))
def __mul__(self, integer):
return str.__new__(Utf8, str.__mul__(self, integer))
def __eq__(self, string):
return str.__eq__(self, Utf8(string))
def __ne__(self, string):
return str.__ne__(self, Utf8(string))
def capitalize(self):
return str.__new__(Utf8, unicode(self, 'utf-8').capitalize().encode('utf-8'))
def center(self, length):
return str.__new__(Utf8, unicode(self, 'utf-8').center(length).encode('utf-8'))
def upper(self):
return str.__new__(Utf8, unicode(self, 'utf-8').upper().encode('utf-8'))
def lower(self):
return str.__new__(Utf8, unicode(self, 'utf-8').lower().encode('utf-8'))
def title(self):
return str.__new__(Utf8, unicode(self, 'utf-8').title().encode('utf-8'))
def index(self, string):
return unicode(self, 'utf-8').index(string if isinstance(string, unicode) else unicode(string, 'utf-8'))
def isalnum(self):
return unicode(self, 'utf-8').isalnum()
def isalpha(self):
return unicode(self, 'utf-8').isalpha()
def isdigit(self):
return unicode(self, 'utf-8').isdigit()
def islower(self):
return unicode(self, 'utf-8').islower()
def isspace(self):
return unicode(self, 'utf-8').isspace()
def istitle(self):
return unicode(self, 'utf-8').istitle()
def isupper(self):
return unicode(self, 'utf-8').isupper()
def zfill(self, length):
return str.__new__(Utf8, unicode(self, 'utf-8').zfill(length).encode('utf-8'))
def join(self, iter):
return str.__new__(Utf8, str.join(self, [Utf8(c) for c in
list(unicode(iter, 'utf-8') if
isinstance(iter, str) else
iter)]))
def lstrip(self, chars=None):
return str.__new__(Utf8, str.lstrip(self, None if chars is None else Utf8(chars)))
def rstrip(self, chars=None):
return str.__new__(Utf8, str.rstrip(self, None if chars is None else Utf8(chars)))
def strip(self, chars=None):
return str.__new__(Utf8, str.strip(self, None if chars is None else Utf8(chars)))
def swapcase(self):
return str.__new__(Utf8, unicode(self, 'utf-8').swapcase().encode('utf-8'))
def count(self, sub, start=0, end=None):
unistr = unicode(self, 'utf-8')
return unistr.count(
unicode(sub, 'utf-8') if isinstance(sub, str) else sub,
start, len(unistr) if end is None else end)
def decode(self, encoding='utf-8', errors='strict'):
return str.decode(self, encoding, errors)
def encode(self, encoding, errors='strict'):
return unicode(self, 'utf-8').encode(encoding, errors)
def expandtabs(self, tabsize=8):
return str.__new__(Utf8, unicode(self, 'utf-8').expandtabs(tabsize).encode('utf-8'))
def find(self, sub, start=None, end=None):
return unicode(self, 'utf-8').find(unicode(sub, 'utf-8')
if isinstance(sub, str) else sub, start, end)
def ljust(self, width, fillchar=' '):
return str.__new__(Utf8, unicode(self, 'utf-8').ljust(width, unicode(fillchar, 'utf-8')
if isinstance(fillchar, str) else fillchar).encode('utf-8'))
def partition(self, sep):
(head, sep, tail) = str.partition(self, Utf8(sep))
return (str.__new__(Utf8, head),
str.__new__(Utf8, sep),
str.__new__(Utf8, tail))
def replace(self, old, new, count=-1):
return str.__new__(Utf8, str.replace(self, Utf8(old), Utf8(new), count))
def rfind(self, sub, start=None, end=None):
return unicode(self, 'utf-8').rfind(unicode(sub, 'utf-8')
if isinstance(sub, str) else sub, start, end)
def rindex(self, string):
return unicode(self, 'utf-8').rindex(string if isinstance(string, unicode)
else unicode(string, 'utf-8'))
def rjust(self, width, fillchar=' '):
return str.__new__(Utf8, unicode(self, 'utf-8').rjust(width, unicode(fillchar, 'utf-8')
if isinstance(fillchar, str) else fillchar).encode('utf-8'))
def rpartition(self, sep):
(head, sep, tail) = str.rpartition(self, Utf8(sep))
return (str.__new__(Utf8, head),
str.__new__(Utf8, sep),
str.__new__(Utf8, tail))
def rsplit(self, sep=None, maxsplit=-1):
return [str.__new__(Utf8, part) for part in str.rsplit(self,
None if sep is None else Utf8(sep), maxsplit)]
def split(self, sep=None, maxsplit=-1):
return [str.__new__(Utf8, part) for part in str.split(self,
None if sep is None else Utf8(sep), maxsplit)]
def splitlines(self, keepends=False):
return [str.__new__(Utf8, part) for part in str.splitlines(self, keepends)]
def startswith(self, prefix, start=0, end=None):
unistr = unicode(self, 'utf-8')
if isinstance(prefix, tuple):
prefix = tuple(unicode(
s, 'utf-8') if isinstance(s, str) else s for s in prefix)
elif isinstance(prefix, str):
prefix = unicode(prefix, 'utf-8')
return unistr.startswith(prefix, start, len(unistr) if end is None else end)
def translate(self, table, deletechars=''):
if isinstance(table, dict):
return str.__new__(Utf8, unicode(self, 'utf-8').translate(table).encode('utf-8'))
else:
return str.__new__(Utf8, str.translate(self, table, deletechars))
def endswith(self, prefix, start=0, end=None):
unistr = unicode(self, 'utf-8')
if isinstance(prefix, tuple):
prefix = tuple(unicode(
s, 'utf-8') if isinstance(s, str) else s for s in prefix)
elif isinstance(prefix, str):
prefix = unicode(prefix, 'utf-8')
return unistr.endswith(prefix, start, len(unistr) if end is None else end)
if hasattr(str, 'format'): # Python 2.5 hasn't got str.format() method
def format(self, *args, **kwargs):
args = [unicode(
s, 'utf-8') if isinstance(s, str) else s for s in args]
kwargs = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,
unicode(v, 'utf-8') if isinstance(v, str) else v)
for k, v in kwargs.iteritems())
return str.__new__(Utf8, unicode(self, 'utf-8').
format(*args, **kwargs).encode('utf-8'))
def __mod__(self, right):
if isinstance(right, tuple):
right = tuple(unicode(v, 'utf-8') if isinstance(v, str) else v
for v in right)
elif isinstance(right, dict):
right = dict((unicode(k, 'utf-8') if isinstance(k, str) else k,
unicode(v, 'utf-8') if isinstance(v, str) else v)
for k, v in right.iteritems())
elif isinstance(right, str):
right = unicode(right, 'utf-8')
return str.__new__(Utf8, unicode(self, 'utf-8').__mod__(right).encode('utf-8'))
def __ge__(self, string):
return sort_key(self) >= sort_key(string)
def __gt__(self, string):
return sort_key(self) > sort_key(string)
def __le__(self, string):
return sort_key(self) <= sort_key(string)
def __lt__(self, string):
return sort_key(self) < sort_key(string)
if __name__ == '__main__':
def doctests():
u"""
doctests:
>>> test_unicode=u'ПРоба Є PRobe'
>>> test_unicode_word=u'ПРоба'
>>> test_number_str='12345'
>>> test_unicode
u'\\u041f\\u0420\\u043e\\u0431\\u0430 \\u0404 PRobe'
>>> print test_unicode
ПРоба Є PRobe
>>> test_word=test_unicode_word.encode('utf-8')
>>> test_str=test_unicode.encode('utf-8')
>>> s=Utf8(test_str)
>>> s
'ПРоба Є PRobe'
>>> type(s)
<class '__main__.Utf8'>
>>> s == test_str
True
>>> len(test_str) # wrong length of utf8-string!
19
>>> len(test_unicode) # RIGHT!
13
>>> len(s) # RIGHT!
13
>>> size(test_str) # size of utf-8 string (in bytes) == len(str)
19
>>> size(test_unicode) # size of unicode string in bytes (packed to utf-8 string)
19
>>> size(s) # size of utf-8 string in bytes
19
>>> try: # utf-8 is a multibyte string. Convert it to unicode for use with builtin ord()
... __builtin__.ord('б') # ascii string
... except Exception, e:
... print 'Exception:', e
Exception: ord() expected a character, but string of length 2 found
>>> ord('б') # utf8.ord() is used(!!!)
1073
>>> ord(u'б') # utf8.ord() is used(!!!)
1073
>>> ord(s[3]) # utf8.ord() is used(!!!)
1073
>>> chr(ord(s[3])) # utf8.chr() and utf8.chr() is used(!!!)
'б'
>>> type(chr(1073)) # utf8.chr() is used(!!!)
<class '__main__.Utf8'>
>>> s=Utf8(test_unicode)
>>> s
'ПРоба Є PRobe'
>>> s == test_str
True
>>> test_str == s
True
>>> s == test_unicode
True
>>> test_unicode == s
True
>>> print test_str.upper() # only ASCII characters uppered
ПРоба Є PROBE
>>> print test_unicode.upper() # unicode gives right result
ПРОБА Є PROBE
>>> s.upper() # utf8 class use unicode.upper()
'ПРОБА Є PROBE'
>>> type(s.upper())
<class '__main__.Utf8'>
>>> s.lower()
'проба є probe'
>>> type(s.lower())
<class '__main__.Utf8'>
>>> s.capitalize()
'Проба є probe'
>>> type(s.capitalize())
<class '__main__.Utf8'>
>>> len(s)
13
>>> len(test_unicode)
13
>>> s+'. Probe is проба'
'ПРоба Є PRobe. Probe is проба'
>>> type(s+'. Probe is проба')
<class '__main__.Utf8'>
>>> s+u'. Probe is проба'
'ПРоба Є PRobe. Probe is проба'
>>> type(s+u'. Probe is проба')
<class '__main__.Utf8'>
>>> s+s
'ПРоба Є PRobeПРоба Є PRobe'
>>> type(s+s)
<class '__main__.Utf8'>
>>> a=s
>>> a+=s
>>> a+=test_unicode
>>> a+=test_str
>>> a
'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'
>>> type(a)
<class '__main__.Utf8'>
>>> s*3
'ПРоба Є PRobeПРоба Є PRobeПРоба Є PRobe'
>>> type(s*3)
<class '__main__.Utf8'>
>>> a=Utf8("-проба-")
>>> a*=10
>>> a
'-проба--проба--проба--проба--проба--проба--проба--проба--проба--проба-'
>>> type(a)
<class '__main__.Utf8'>
>>> print "'"+test_str.center(17)+"'" # WRONG RESULT!
'ПРоба Є PRobe'
>>> s.center(17) # RIGHT!
' ПРоба Є PRobe '
>>> type(s.center(17))
<class '__main__.Utf8'>
>>> (test_word+test_number_str).isalnum() # WRONG RESULT! non ASCII chars are detected as non alpha
False
>>> Utf8(test_word+test_number_str).isalnum()
True
>>> s.isalnum()
False
>>> test_word.isalpha() # WRONG RESULT! Non ASCII characters are detected as non alpha
False
>>> Utf8(test_word).isalpha() # RIGHT!
True
>>> s.lower().islower()
True
>>> s.upper().isupper()
True
>>> print test_str.zfill(17) # WRONG RESULT!
ПРоба Є PRobe
>>> s.zfill(17) # RIGHT!
'0000ПРоба Є PRobe'
>>> type(s.zfill(17))
<class '__main__.Utf8'>
>>> s.istitle()
False
>>> s.title().istitle()
True
>>> Utf8('1234').isdigit()
True
>>> Utf8(' \t').isspace()
True
>>> s.join('•|•')
'•ПРоба Є PRobe|ПРоба Є PRobe•'
>>> s.join((str('(utf8 тест1)'), unicode('(unicode тест2)','utf-8'), '(ascii test3)'))
'(utf8 тест1)ПРоба Є PRobe(unicode тест2)ПРоба Є PRobe(ascii test3)'
>>> type(s)
<class '__main__.Utf8'>
>>> s==test_str
True
>>> s==test_unicode
True
>>> s.swapcase()
'прОБА є prOBE'
>>> type(s.swapcase())
<class '__main__.Utf8'>
>>> truncate(s, 10)
'ПРоба Є...'
>>> truncate(s, 20)
'ПРоба Є PRobe'
>>> truncate(s, 10, '•••') # utf-8 string as *dots*
'ПРоба Є•••'
>>> truncate(s, 10, u'®') # you can use unicode string as *dots*
'ПРоба Є P®'
>>> type(truncate(s, 10))
<class '__main__.Utf8'>
>>> Utf8(s.encode('koi8-u'), 'koi8-u')
'ПРоба Є PRobe'
>>> s.decode() # convert utf-8 string to unicode
u'\\u041f\\u0420\\u043e\\u0431\\u0430 \\u0404 PRobe'
>>> a='про\\tba'
>>> str_tmp=a.expandtabs()
>>> utf8_tmp=Utf8(a).expandtabs()
>>> utf8_tmp.replace(' ','.') # RIGHT! (default tabsize is 8)
'про.....ba'
>>> utf8_tmp.index('b')
8
>>> print "'"+str_tmp.replace(' ','.')+"'" # WRONG STRING LENGTH!
'про..ba'
>>> str_tmp.index('b') # WRONG index of 'b' character
8
>>> print "'"+a.expandtabs(4).replace(' ','.')+"'" # WRONG RESULT!
'про..ba'
>>> Utf8(a).expandtabs(4).replace(' ','.') # RIGHT!
'про.ba'
>>> s.find('Є')
6
>>> s.find(u'Є')
6
>>> s.find(' ', 6)
7
>>> s.rfind(' ')
7
>>> s.partition('Є')
('ПРоба ', 'Є', ' PRobe')
>>> s.partition(u'Є')
('ПРоба ', 'Є', ' PRobe')
>>> (a,b,c) = s.partition('Є')
>>> type(a), type(b), type(c)
(<class '__main__.Utf8'>, <class '__main__.Utf8'>, <class '__main__.Utf8'>)
>>> s.partition(' ')
('ПРоба', ' ', 'Є PRobe')
>>> s.rpartition(' ')
('ПРоба Є', ' ', 'PRobe')
>>> s.index('Є')
6
>>> s.rindex(u'Є')
6
>>> s.index(' ')
5
>>> s.rindex(' ')
7
>>> a=Utf8('а б ц д е а б ц д е а\\tб ц д е')
>>> a.split()
['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д',
'е', 'а', 'б', 'ц', 'д', 'е']
>>> a.rsplit()
['а', 'б', 'ц', 'д', 'е', 'а', 'б', 'ц', 'д',
'е', 'а', 'б', 'ц', 'д', 'е']
>>> a.expandtabs().split('б')
['а ', ' ц д е а ', ' ц д е а ', ' ц д е']
>>> a.expandtabs().rsplit('б')
['а ', ' ц д е а ', ' ц д е а ', ' ц д е']
>>> a.expandtabs().split(u'б', 1)
['а ', ' ц д е а б ц д е а б ц д е']
>>> a.expandtabs().rsplit(u'б', 1)
['а б ц д е а б ц д е а ', ' ц д е']
>>> a=Utf8("рядок1\\nрядок2\\nрядок3")
>>> a.splitlines()
['рядок1', 'рядок2', 'рядок3']
>>> a.splitlines(True)
['рядок1\\n', 'рядок2\\n', 'рядок3']
>>> s[6]
'Є'
>>> s[0]
'П'
>>> s[-1]
'e'
>>> s[:10]
'ПРоба Є PR'
>>> s[2:-2:2]
'оаЄPo'
>>> s[::-1]
'eboRP Є абоРП'
>>> s.startswith('ПР')
True
>>> s.startswith(('ПР', u'об'),0)
True
>>> s.startswith(u'об', 2, 4)
True
>>> s.endswith('be')
True
>>> s.endswith(('be', 'PR', u'Є'))
True
>>> s.endswith('PR', 8, 10)
True
>>> s.endswith('Є', -7, -6)
True
>>> s.count(' ')
2
>>> s.count(' ',6)
1
>>> s.count(u'Є')
1
>>> s.count('Є', 0, 5)
0
>>> Utf8(
"Parameters: '%(проба)s', %(probe)04d, %(проба2)s") % { u"проба": s,
... "not used": "???", "probe": 2, "проба2": u"ПРоба Probe" }
"Parameters: 'ПРоба Є PRobe', 0002, ПРоба Probe"
>>> a=Utf8(u"Параметр: (%s)-(%s)-[%s]")
>>> a%=(s, s[::-1], 1000)
>>> a
'Параметр: (ПРоба Є PRobe)-(eboRP Є абоРП)-[1000]'
>>> if hasattr(Utf8, 'format'):
... Utf8("Проба <{0}>, {1}, {param1}, {param2}").format(s, u"中文字",
... param1="барабан", param2=1000) == 'Проба <ПРоба Є PRobe>, 中文字, барабан, 1000'
... else: # format() method is not used in python with version <2.6:
... print True
True
>>> u'Б'<u'Ї' # WRONG ORDER!
False
>>> 'Б'<'Ї' # WRONG ORDER!
False
>>> Utf8('Б')<'Ї' # RIGHT!
True
>>> u'д'>u'ґ' # WRONG ORDER!
False
>>> Utf8('д')>Utf8('ґ') # RIGHT!
True
>>> u'є'<=u'ж' # WRONG ORDER!
False
>>> Utf8('є')<=u'ж' # RIGHT!
True
>>> Utf8('є')<=u'є'
True
>>> u'Ї'>=u'И' # WRONG ORDER!
False
>>> Utf8(u'Ї') >= u'И' # RIGHT
True
>>> Utf8('Є') >= 'Є'
True
>>> a="яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ" # str type
>>> b=u"яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ" # unicode type
>>> c=Utf8("яжертиуіопшщїасдфгґхйклчєзьцвбнмюЯЖЕРТИУІОПШЩЇАСДФГҐХЙКЛЧЗЬЦВБНМЮЄ") # utf8 class
>>> result = "".join(sorted(a))
>>> result[0:20] # result is not utf8 string, because bytes, not utf8-characters were sorted
'\\x80\\x81\\x82\\x83\\x84\\x84\\x85\\x86\\x86\\x87\\x87\\x88\\x89\\x8c\\x8e\\x8f\\x90\\x90\\x91\\x91'
>>> try:
... unicode(result, 'utf-8') # try to convert result (utf-8?) to unicode
... except Exception, e:
... print 'Exception:', e
Exception: 'utf8' codec can't decode byte 0x80 in position 0: unexpected code byte
>>> try: # FAILED! (working with bytes, not with utf8-charactes)
... "".join( sorted(a, key=sort_key) ) # utf8.sort_key may be used with utf8 or unicode strings only!
... except Exception, e:
... print 'Exception:', e
Exception: 'utf8' codec can't decode byte 0xd1 in position 0: unexpected end of data
>>> print "".join( sorted(Utf8(a))) # converting *a* to unicode or utf8-string gives us correct result
аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ
>>> print u"".join( sorted(b) ) # WRONG ORDER! Default sort key is used
ЄІЇАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯабвгдежзийклмнопрстуфхцчшщьюяєіїҐґ
>>> print u"".join( sorted(b, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used
аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ
>>> print "".join( sorted(c) ) # RIGHT ORDER! Utf8 "rich comparison" methods are used
аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ
>>> print "".join( sorted(c, key=sort_key) ) # RIGHT ORDER! utf8.sort_key is used
аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ
>>> Utf8().join(sorted(c.decode(), key=sort_key)) # convert to unicode for better performance
'аАбБвВгГґҐдДеЕєЄжЖзЗиИіІїЇйЙкКлЛмМнНоОпПрРсСтТуУфФхХцЦчЧшШщЩьЬюЮяЯ'
>>> for result in sorted(
["Іа", "Астро", u"гала", Utf8("Гоша"), "Єва", "шовк", "аякс", "Їжа",
... "ґанок", Utf8("Дар'я"), "білінг", "веб", u"Жужа", "проба", u"тест",
... "абетка", "яблуко", "Юляся", "Київ", "лимонад", "ложка", "Матриця",
... ], key=sort_key):
... print result.ljust(20), type(result)
абетка <type 'str'>
Астро <type 'str'>
аякс <type 'str'>
білінг <type 'str'>
веб <type 'str'>
гала <type 'unicode'>
ґанок <type 'str'>
Гоша <class '__main__.Utf8'>
Дар'я <class '__main__.Utf8'>
Єва <type 'str'>
Жужа <type 'unicode'>
Іа <type 'str'>
Їжа <type 'str'>
Київ <type 'str'>
лимонад <type 'str'>
ложка <type 'str'>
Матриця <type 'str'>
проба <type 'str'>
тест <type 'unicode'>
шовк <type 'str'>
Юляся <type 'str'>
яблуко <type 'str'>
>>> a=Utf8("中文字")
>>> L=list(a)
>>> L
['中', '文', '字']
>>> a="".join(L)
>>> print a
中文字
>>> type(a)
<type 'str'>
>>> a="中文字" # standard str type
>>> L=list(a)
>>> L
['\\xe4', '\\xb8', '\\xad', '\\xe6', '\\x96', '\\x87',
'\\xe5', '\\xad', '\\x97']
>>> from string import maketrans
>>> str_tab=maketrans('PRobe','12345')
>>> unicode_tab={ord(u'П'):ord(u'Ж'),
... ord(u'Р') : u'Ш',
... ord(Utf8('о')) : None, # utf8.ord() is used
... ord('б') : None, # -//-//-
... ord(u'а') : u"中文字",
... ord(u'Є') : Utf8('•').decode(), # only unicode type is supported
... }
>>> s.translate(unicode_tab).translate(str_tab, deletechars=' ')
'ЖШ中文字•12345'
"""
import sys
reload(sys)
sys.setdefaultencoding("UTF-8")
import doctest
print "DOCTESTS STARTED..."
doctest.testmod()
print "DOCTESTS FINISHED"
doctests()
| bsd-3-clause |
Cactuslegs/audacity-of-nope | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/cs.py | 133 | 4142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]))
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,flag[len(x):])
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
| gpl-2.0 |
hakanozadam/bal | bal/umass_cluster/merge_bed_files.py | 1 | 2886 | #!/bin/env python3
# AUTHORS:
# Hakan Ozadam
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
####################################################################
import argparse
import os
import operator
####################################################################
####################################################################
def merge_bed_files(input_list, output_bed):
output_bed_entries = dict()
for file in input_list:
with open(file, 'r') as input_stream:
for line in input_stream:
contents = line.strip().split()
contents[4] = int(contents[4])
print(contents)
if output_bed_entries.get(contents[3]):
output_bed_entries[contents[3]][4] = \
output_bed_entries[contents[3]][4] +\
contents[4]
else:
output_bed_entries[contents[3]] = contents
sorted_entries = sorted(output_bed_entries, key = operator.itemgetter(0))
print('---sorted_entries---\n', sorted_entries)
print('---output_entries---\n', output_bed_entries)
with open(output_bed, 'w') as output_stream:
for entry in sorted_entries:
output_bed_entries[entry][4] = str(output_bed_entries[entry][4])
print("\t".join(output_bed_entries[entry]) , file = output_stream)
####################################################################
def main():
parser = argparse.ArgumentParser(description=
'''
Merge the bed files given as a list in the input file and write the result
to the output given in the -o argument. The input list should be one file per line.
''')
parser.add_argument("-i" ,
help = "Input bed file list One file per line." ,
required = True ,
metavar = "input_file_list" ,
type = str)
parser.add_argument("-o" ,
help = "Merged bed file" ,
required = True ,
metavar = "output_file" ,
type = str)
arguments = parser.parse_args()
input_file_list = list()
with open(arguments.i, 'r') as input_list_stream:
for line in input_list_stream:
file_path = line.strip()
if not os.path.isfile(file_path):
raise(FileNotFoundError("The bed file\n", file_path,\
"doesn't exist!") )
input_file_list.append(file_path)
merge_bed_files(input_file_list, arguments.o)
#####################################################################
if __name__ == '__main__':
main() | gpl-2.0 |
slightperturbation/Cobalt | ext/emsdk_portable/emscripten/1.27.0/tools/settings_template_readonly.py | 1 | 2171 | # This file will be edited (the {{{ }}} things), and then ~/.emscripten created with the result, if ~/.emscripten doesn't exist.
# Note: If you put paths relative to the home directory, do not forget os.path.expanduser
import os
# this helps projects using emscripten find it
EMSCRIPTEN_ROOT = os.path.expanduser(os.getenv('EMSCRIPTEN') or '{{{ EMSCRIPTEN_ROOT }}}') # directory
LLVM_ROOT = os.path.expanduser(os.getenv('LLVM') or '{{{ LLVM_ROOT }}}') # directory
# If not specified, defaults to sys.executable.
#PYTHON = 'python'
# See below for notes on which JS engine(s) you need
NODE_JS = os.path.expanduser(os.getenv('NODE') or '{{{ NODE }}}') # executable
SPIDERMONKEY_ENGINE = [os.path.expanduser(os.getenv('SPIDERMONKEY') or 'js')] # executable
V8_ENGINE = os.path.expanduser(os.getenv('V8') or 'd8') # executable
JAVA = 'java' # executable
TEMP_DIR = '{{{ TEMP }}}'
CRUNCH = os.path.expanduser(os.getenv('CRUNCH') or 'crunch') # executable
#CLOSURE_COMPILER = '..' # define this to not use the bundled version
########################################################################################################
# Pick the JS engine to use for running the compiler. This engine must exist, or
# nothing can be compiled.
#
# Recommendation: If you already have node installed, use that. Otherwise, build v8 or
# spidermonkey from source. Any of these three is fine, as long as it's
# a recent version (especially for v8 and spidermonkey).
COMPILER_ENGINE = NODE_JS
#COMPILER_ENGINE = V8_ENGINE
#COMPILER_ENGINE = SPIDERMONKEY_ENGINE
# All JS engines to use when running the automatic tests. Not all the engines in this list
# must exist (if they don't, they will be skipped in the test runner).
#
# Recommendation: If you already have node installed, use that. If you can, also build
# spidermonkey from source as well to get more test coverage (node can't
# run all the tests due to node issue 1669). v8 is currently not recommended
# here because of v8 issue 1822.
JS_ENGINES = [NODE_JS] # add this if you have spidermonkey installed too, SPIDERMONKEY_ENGINE]
| apache-2.0 |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/celery/tests/test_concurrency/test_pool.py | 14 | 2446 | from __future__ import absolute_import
import sys
import time
import logging
import itertools
from nose import SkipTest
from celery.datastructures import ExceptionInfo
from celery.tests.utils import Case
def do_something(i):
return i * i
def long_something():
time.sleep(1)
def raise_something(i):
try:
raise KeyError("FOO EXCEPTION")
except KeyError:
return ExceptionInfo(sys.exc_info())
class TestTaskPool(Case):
def setUp(self):
try:
__import__("multiprocessing")
except ImportError:
raise SkipTest("multiprocessing not supported")
from celery.concurrency.processes import TaskPool
self.TaskPool = TaskPool
def test_attrs(self):
p = self.TaskPool(2)
self.assertEqual(p.limit, 2)
self.assertIsInstance(p.logger, logging.Logger)
self.assertIsNone(p._pool)
def x_apply(self):
p = self.TaskPool(2)
p.start()
scratchpad = {}
proc_counter = itertools.count().next
def mycallback(ret_value):
process = proc_counter()
scratchpad[process] = {}
scratchpad[process]["ret_value"] = ret_value
myerrback = mycallback
res = p.apply_async(do_something, args=[10], callback=mycallback)
res2 = p.apply_async(raise_something, args=[10], errback=myerrback)
res3 = p.apply_async(do_something, args=[20], callback=mycallback)
self.assertEqual(res.get(), 100)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 100},
scratchpad.get(0))
self.assertIsInstance(res2.get(), ExceptionInfo)
self.assertTrue(scratchpad.get(1))
time.sleep(1)
self.assertIsInstance(scratchpad[1]["ret_value"],
ExceptionInfo)
self.assertEqual(scratchpad[1]["ret_value"].exception.args,
("FOO EXCEPTION", ))
self.assertEqual(res3.get(), 400)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 400},
scratchpad.get(2))
res3 = p.apply_async(do_something, args=[30], callback=mycallback)
self.assertEqual(res3.get(), 900)
time.sleep(0.5)
self.assertDictContainsSubset({"ret_value": 900},
scratchpad.get(3))
p.stop()
| mit |
tralamazza/micropython | tests/float/float2int_fp30_intbig.py | 30 | 2629 | # check cases converting float to int, relying only on single precision float
try:
import ustruct as struct
except:
import struct
import sys
maxsize_bits = 0
maxsize = sys.maxsize
while maxsize:
maxsize >>= 1
maxsize_bits += 1
# work out configuration values
is_64bit = maxsize_bits > 32
# 0 = none, 1 = long long, 2 = mpz
ll_type = None
if is_64bit:
if maxsize_bits < 63:
ll_type = 0
else:
if maxsize_bits < 31:
ll_type = 0
if ll_type is None:
one = 1
if one << 65 < one << 62:
ll_type = 1
else:
ll_type = 2
# basic conversion
print(int(14187744.))
print("%d" % 14187744.)
if ll_type == 2:
print(int(2.**100))
print("%d" % 2.**100)
testpass = True
p2_rng = ((30,63,127),(62,63,127))[is_64bit][ll_type]
for i in range(0,p2_rng):
bitcnt = len(bin(int(2.**i))) - 3;
if i != bitcnt:
print('fail: 2.**%u was %u bits long' % (i, bitcnt));
testpass = False
print("power of 2 test: %s" % (testpass and 'passed' or 'failed'))
# TODO why does 10**12 fail this test for single precision float?
testpass = True
p10_rng = 9
for i in range(0,p10_rng):
digcnt = len(str(int(10.**i))) - 1;
if i != digcnt:
print('fail: 10.**%u was %u digits long' % (i, digcnt));
testpass = False
print("power of 10 test: %s" % (testpass and 'passed' or 'failed'))
def fp2int_test(num, name, should_fail):
try:
x = int(num)
passed = ~should_fail
except:
passed = should_fail
print('%s: %s' % (name, passed and 'passed' or 'failed'))
if ll_type != 2:
if ll_type == 0:
if is_64bit:
neg_bad_fp = -1.00000005*2.**62.
pos_bad_fp = 2.**62.
neg_good_fp = -2.**62.
pos_good_fp = 0.99999993*2.**62.
else:
neg_bad_fp = -1.00000005*2.**30.
pos_bad_fp = 2.**30.
neg_good_fp = -2.**30.
pos_good_fp = 0.9999999499*2.**30.
else:
neg_bad_fp = -0.51*2.**64.
pos_bad_fp = 2.**63.
neg_good_fp = -2.**63.
pos_good_fp = 1.9999998*2.**62.
fp2int_test(neg_bad_fp, 'neg bad', True)
fp2int_test(pos_bad_fp, 'pos bad', True)
fp2int_test(neg_good_fp, 'neg good', False)
fp2int_test(pos_good_fp, 'pos good', False)
else:
fp2int_test(-1.999999879*2.**126., 'large neg', False)
fp2int_test(1.999999879*2.**126., 'large pos', False)
fp2int_test(float('inf'), 'inf test', True)
fp2int_test(float('nan'), 'NaN test', True)
# test numbers < 1 (this used to fail; see issue #1044)
fp2int_test(0.0001, 'small num', False)
struct.pack('I', int(1/2))
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.