repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
foss-transportationmodeling/rettina-server | flask/lib/python2.7/site-packages/flask/__init__.py | 425 | 1674 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10.1'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| apache-2.0 |
Jeff-Wang93/vent | vent/menus/tutorials.py | 1 | 2688 | import npyscreen
class TutorialForm(npyscreen.ActionFormWithMenus):
""" Tutorial form for the Vent CLI """
def __init__(self, title='', text='', next_tutorial='', *args, **keywords):
""" Initialize tutorial form fields """
self.title = title
self.text = text
self.next_tutorial = next_tutorial
super(TutorialForm, self).__init__(*args, **keywords)
def switch(self, name):
""" Wrapper that switches to provided form """
self.parentApp.change_form(name)
def quit(self, *args, **kwargs):
""" Overridden to switch back to MAIN form """
self.parentApp.switchForm('MAIN')
def create(self):
""" Overridden to add handlers and content """
self.add_handlers({'^Q': self.quit})
self.add(npyscreen.TitleText, name=self.title, editable=False)
self.add(npyscreen.MultiLineEdit, editable=False, value=self.text,
max_width=75, slow_scroll=True)
self.m2 = self.add_menu(name='About Vent', shortcut='v')
self.m2.addItem(text='Background', onSelect=self.switch,
arguments=['TUTORIALBACKGROUND'], shortcut='b')
self.m2.addItem(text='Terminology', onSelect=self.switch,
arguments=['TUTORIALTERMINOLOGY'], shortcut='t')
self.m2.addItem(text='Getting Setup', onSelect=self.switch,
arguments=['TUTORIALGETTINGSETUP'], shortcut='s')
self.m3 = self.add_menu(name='Working with Cores', shortcut='c')
self.m3.addItem(text='Building Cores', onSelect=self.switch,
arguments=['TUTORIALBUILDINGCORES'], shortcut='b')
self.m3.addItem(text='Starting Cores', onSelect=self.switch,
arguments=['TUTORIALSTARTINGCORES'], shortcut='c')
self.m4 = self.add_menu(name='Working with Plugins', shortcut='p')
self.m4.addItem(text='Adding Plugins', onSelect=self.switch,
arguments=['TUTORIALADDINGPLUGINS'], shortcut='a')
self.m5 = self.add_menu(name='Files', shortcut='f')
self.m5.addItem(text='Adding Files', onSelect=self.switch,
arguments=['TUTORIALADDINGFILES'], shortcut='a')
self.m6 = self.add_menu(name='Help', shortcut='s')
self.m6.addItem(text='Basic Troubleshooting', onSelect=self.switch,
arguments=['TUTORIALTROUBLESHOOTING'], shortcut='t')
def on_cancel(self):
""" When user clicks cancel, will return to MAIN """
self.quit()
def on_ok(self):
""" When user clicks ok, will proceed to next tutorial """
self.switch(self.next_tutorial)
| apache-2.0 |
ARL-UTEP-OC/emubox | workshop-creator/python27-64bit-gtk3/Lib/site-packages/gi/overrides/Dee.py | 1 | 6733 | from gi.overrides import override
from gi.importer import modules
Dee = modules['Dee']._introspection_module
from gi.repository import GLib
__all__ = []
class RowWrapper:
def __init__ (self, model, itr):
self.model = model
self.itr = itr
self.__initialized = True
def __getitem__ (self, column):
return self.model.get_value(self.itr, column)
def __setitem__ (self, column, val):
self.model.set_value (self.itr, column, val)
def __getattr__ (self, name):
col_index = self.model.get_column_index (name)
if col_index < 0:
raise AttributeError("object has no attribute '%s'" % name)
return self.model.get_value (self.itr, col_index)
def __setattr__ (self, name, value):
if not "_RowWrapper__initialized" in self.__dict__:
self.__dict__[name] = value
return
col_index = self.model.get_column_index (name)
if col_index < 0:
raise AttributeError("object has no attribute '%s'" % name)
self.model.set_value (self.itr, col_index, value)
def __iter__ (self):
for column in range(self.model.get_n_columns()):
yield self.model.get_value (self.itr, column)
def __len__ (self):
return self.model.get_n_columns()
def __str__ (self):
return "(%s)" % ", ".join(map(str,self))
def __eq__ (self, other):
if not isinstance (other, RowWrapper):
return False
if self.model != other.model:
return False
return self.itr == other.itr
class Model(Dee.Model):
def __init__(self):
Dee.Model.__init__(self)
def set_schema (self, *args):
self.set_schema_full (tuple(args))
def set_column_names (self, *args):
self.set_column_names_full (tuple(args))
def _build_row (self, args, kwargs):
schema = self.get_schema()
result = [None] * len(schema)
if len(args) > 0:
for i, arg in enumerate(args):
if isinstance(arg, GLib.Variant):
result[i] = arg
else:
result[i] = GLib.Variant(schema[i], arg)
# check
if result.count(None) > 0:
raise RuntimeError("Not all columns were set")
else:
names = self.get_column_names()
dicts = [None] * len(schema)
if len(names) == 0:
raise RuntimeError("Column names were not set")
for col_name, arg in kwargs.items():
if names.count(col_name) > 0:
col_index = names.index(col_name)
variant = arg if isinstance(arg, GLib.Variant) else GLib.Variant(schema[col_index], arg)
result[col_index] = variant
else:
col_schema, col_index = self.get_field_schema(col_name)
if col_schema:
variant = arg if isinstance(arg, GLib.Variant) else GLib.Variant(col_schema, arg)
colon_index = col_name.find("::")
field_name = col_name if colon_index < 0 else col_name[colon_index+2:]
if dicts[col_index] is None: dicts[col_index] = {}
dicts[col_index][field_name] = variant
else:
raise RuntimeError("Unknown column name: %s" % col_name)
# finish vardict creation
for index, d in enumerate(dicts):
if d: result[index] = GLib.Variant(schema[index], d)
# handle empty dicts (no "xrange" in python3)
for i in range(len(schema)):
if result[i] is None and schema[i] == "a{sv}":
result[i] = GLib.Variant(schema[i], {})
# checks
num_unset = result.count(None)
if num_unset > 0:
col_name = names[result.index(None)]
raise RuntimeError("Column '%s' was not set" % col_name)
return result
def prepend (self, *args, **kwargs):
return self.prepend_row (self._build_row(args, kwargs))
def append (self, *args, **kwargs):
return self.append_row (self._build_row(args, kwargs))
def insert (self, pos, *args, **kwargs):
return self.insert_row (pos, self._build_row(args, kwargs))
def insert_before (self, iter, *args, **kwargs):
return self.insert_row_before (iter, self._build_row(args, kwargs))
def insert_row_sorted (self, row_spec, sort_func, data):
return self.insert_row_sorted_with_sizes (row_spec, sort_func, data)
def insert_sorted (self, sort_func, *args, **kwargs):
return self.insert_row_sorted (self._build_row(args, kwargs), sort_func, None)
def find_row_sorted (self, row_spec, sort_func, data):
return self.find_row_sorted_with_sizes (row_spec, sort_func, data)
def find_sorted (self, sort_func, *args, **kwargs):
return self.find_row_sorted (self._build_row(args, kwargs), sort_func, None)
def get_schema (self):
return Dee.Model.get_schema(self)
def get_value (self, itr, column):
return Dee.Model.get_value (self, itr, column).unpack()
def set_value (self, itr, column, value):
var = GLib.Variant (self.get_column_schema(column), value)
if isinstance (itr, int):
itr = self.get_iter_at_row(itr)
Dee.Model.set_value (self, itr, column, var)
def __getitem__ (self, itr):
if isinstance (itr, int):
itr = self.get_iter_at_row(itr)
return RowWrapper(self, itr)
def __setitem__ (self, itr, row):
max_col = self.get_n_columns ()
for column, value in enumerate (row):
if column >= max_col:
raise IndexError("Too many columns in row assignment: %s" % column)
self.set_value (itr, column, value)
def get_row (self, itr):
return self[itr]
def __iter__ (self):
itr = self.get_first_iter ()
last = self.get_last_iter ()
while itr != last:
yield self.get_row(itr)
itr = self.next(itr)
raise StopIteration
def __len__ (self):
return self.get_n_rows()
class ModelIter(Dee.ModelIter):
def __init__(self):
Dee.ModelIter.__init__(self)
def __eq__ (self, other):
if not isinstance (other, ModelIter):
return False
return repr(self) == repr(other)
Model = override(Model)
__all__.append('Model')
ModelIter = override(ModelIter)
__all__.append('ModelIter')
| gpl-2.0 |
sergiomb2/gdesklets | Controls/Sensor/__init__.py | 2 | 2382 | from factory.SensorFactory import SensorFactory
from main.Control import Control
from ISensor import ISensor
import gtk
#
# This control wraps legacy sensors to make them still usable.
# Sensors are deprecated and shouldn't be used in new stuff. This control is
# solely meant for retaining backwards compatibility.
#
class Sensor(Control, ISensor):
def __init__(self):
self.__sensor = None
self.__sensor_factory = SensorFactory()
self.__output = None
self.__menu = None
Control.__init__(self)
#
# Loads the given sensor with arguments.
#
def __set_sensor(self, value):
module, args = value
sensor = self.__sensor_factory.create_sensor(module, args)
if (sensor):
sensor.add_observer(self.__on_observe_sensor)
else:
raise RuntimeError("Could not load sensor")
self.__sensor = sensor
#
# Sends an action to the sensor.
#
def __set_action(self, value):
callname, path, allargs = value
self.__sensor.send_action(callname, path, allargs)
def __set_config_id(self, value): self.__sensor.set_config_id(value)
def __set_stop(self, value): self.__sensor.stop()
def __get_output(self): return self.__output
def __get_menu(self): return self.__menu
def __get_configurator(self): return self.__sensor.get_configurator()
#
# Observer for the sensor.
#
def __on_observe_sensor(self, src, cmd, data):
# propagate the incoming sensor output
if (cmd == src.OBS_OUTPUT):
self.__output = data
self._update("output")
elif (cmd == src.OBS_CMD_MENU):
self.__menu = data
self._update("menu")
sensor = property(None, __set_sensor, doc = "the sensor")
action = property(None, __set_action, doc = "the action to perform")
config_id = property(None, __set_config_id, doc = "the config ID")
stop = property(None, __set_stop, doc = "stops the sensor")
output = property(__get_output, None,
doc = "the output data of the sensor")
menu = property(__get_menu, None,
doc = "the menu data of the sensor")
configurator = property(__get_configurator, None,
doc = "the configurator of the sensor")
def get_class(): return Sensor
| gpl-2.0 |
Dark-Hacker/horizon | openstack_dashboard/dashboards/project/overview/views.py | 45 | 2065 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import capfirst # noqa
from django.template.defaultfilters import floatformat # noqa
from django.utils.translation import ugettext_lazy as _
from horizon.utils import csvbase
from horizon import views
from openstack_dashboard import usage
class ProjectUsageCsvRenderer(csvbase.BaseCsvResponse):
columns = [_("Instance Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)"),
_("Time since created (Seconds)"), _("State")]
def get_row_data(self):
for inst in self.context['usage'].get_instances():
yield (inst['name'],
inst['vcpus'],
inst['memory_mb'],
inst['local_gb'],
floatformat(inst['hours'], 2),
inst['uptime'],
capfirst(inst['state']))
class ProjectOverview(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'project/overview/usage.html'
csv_response_class = ProjectUsageCsvRenderer
def get_data(self):
super(ProjectOverview, self).get_data()
return self.usage.get_instances()
class WarningView(views.HorizonTemplateView):
template_name = "project/_warning.html"
| apache-2.0 |
annarev/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py | 9 | 6324 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapParallelization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _test_combinations():
def assert_greater(x):
assert_op = control_flow_ops.Assert(math_ops.greater(x, -1), [x])
with ops.control_dependencies([assert_op]):
return x
cases = [
("Identity", lambda x: x, True),
("Increment", lambda x: x + 1, True),
("AssertGreater", assert_greater, True),
]
def reduce_fn(x, y):
name, function, should_optimize = y
return x + combinations.combine(
function=combinations.NamedObject(name, function),
should_optimize=should_optimize)
return functools.reduce(reduce_fn, cases, [])
class MapParallelizationTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_combinations()))
def testMapParallelization(self, function, should_optimize):
next_nodes = ["ParallelMap"] if should_optimize else ["Map"]
dataset = dataset_ops.Dataset.range(5).apply(
testing.assert_next(next_nodes)).map(function)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[function(x) for x in range(5)])
@combinations.generate(test_base.default_test_combinations())
def testCapturedConstant(self):
captured_t = constant_op.constant(42, dtype=dtypes.int64)
def fn(x):
return x + captured_t
dataset = dataset_ops.Dataset.range(5).apply(
testing.assert_next(["ParallelMap"])).map(fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[x + 42 for x in range(5)])
@combinations.generate(test_base.default_test_combinations())
def testCapturedVariable(self):
captured_t = variables.Variable(42, dtype=dtypes.int64)
def fn(x):
return x + captured_t
dataset = dataset_ops.Dataset.range(5).apply(
testing.assert_next(["Map"])).map(fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.evaluate(variables.global_variables_initializer())
self.assertDatasetProduces(
dataset,
expected_output=[x + 42 for x in range(5)],
requires_initialization=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(apply_autotune=[None, True, False])))
def testAutotuneOption(self, apply_autotune):
next_nodes = ["ParallelMap"] if (apply_autotune is not False) else ["Map"] # pylint: disable=g-bool-id-comparison
dataset = dataset_ops.Dataset.range(4).apply(
testing.assert_next(next_nodes)).map(lambda x: x + 2)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
if apply_autotune is not None:
options.experimental_optimization.autotune = apply_autotune
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[2, 3, 4, 5])
@combinations.generate(test_base.default_test_combinations())
def testNoParallelizationInsideInterleave(self):
def func(i):
ds = dataset_ops.Dataset.range(i).apply(testing.assert_next(
["Map"])).map(lambda x: x + 1)
return ds
dataset = dataset_ops.Dataset.range(1, 4).interleave(
map_func=func, cycle_length=2, block_length=2)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[1, 1, 2, 1, 2, 3])
@combinations.generate(test_base.default_test_combinations())
def testNoParallelizationInsideFlatMap(self):
def func(i):
ds = dataset_ops.Dataset.range(i).apply(testing.assert_next(
["Map"])).map(lambda x: x + 1)
return ds
dataset = dataset_ops.Dataset.range(1, 4).flat_map(map_func=func)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[1, 1, 2, 1, 2, 3])
if __name__ == "__main__":
test.main()
| apache-2.0 |
elijah513/django | tests/get_object_or_404/tests.py | 296 | 3681 | from __future__ import unicode_literals
from django.http import Http404
from django.shortcuts import get_list_or_404, get_object_or_404
from django.test import TestCase
from .models import Article, Author
class GetObjectOr404Tests(TestCase):
def test_get_object_or_404(self):
a1 = Author.objects.create(name="Brave Sir Robin")
a2 = Author.objects.create(name="Patsy")
# No Articles yet, so we should get a Http404 error.
self.assertRaises(Http404, get_object_or_404, Article, title="Foo")
article = Article.objects.create(title="Run away!")
article.authors = [a1, a2]
# get_object_or_404 can be passed a Model to query.
self.assertEqual(
get_object_or_404(Article, title__contains="Run"),
article
)
# We can also use the Article manager through an Author object.
self.assertEqual(
get_object_or_404(a1.article_set, title__contains="Run"),
article
)
# No articles containing "Camelot". This should raise a Http404 error.
self.assertRaises(
Http404,
get_object_or_404, a1.article_set, title__contains="Camelot"
)
# Custom managers can be used too.
self.assertEqual(
get_object_or_404(Article.by_a_sir, title="Run away!"),
article
)
# QuerySets can be used too.
self.assertEqual(
get_object_or_404(Article.objects.all(), title__contains="Run"),
article
)
# Just as when using a get() lookup, you will get an error if more than
# one object is returned.
self.assertRaises(
Author.MultipleObjectsReturned,
get_object_or_404, Author.objects.all()
)
# Using an empty QuerySet raises a Http404 error.
self.assertRaises(
Http404,
get_object_or_404, Article.objects.none(), title__contains="Run"
)
# get_list_or_404 can be used to get lists of objects
self.assertEqual(
get_list_or_404(a1.article_set, title__icontains="Run"),
[article]
)
# Http404 is returned if the list is empty.
self.assertRaises(
Http404,
get_list_or_404, a1.article_set, title__icontains="Shrubbery"
)
# Custom managers can be used too.
self.assertEqual(
get_list_or_404(Article.by_a_sir, title__icontains="Run"),
[article]
)
# QuerySets can be used too.
self.assertEqual(
get_list_or_404(Article.objects.all(), title__icontains="Run"),
[article]
)
def test_bad_class(self):
# Given an argument klass that is not a Model, Manager, or Queryset
# raises a helpful ValueError message
self.assertRaisesMessage(
ValueError,
"Object is of type 'str', but must be a Django Model, Manager, "
"or QuerySet",
get_object_or_404, str("Article"), title__icontains="Run"
)
class CustomClass(object):
pass
self.assertRaisesMessage(
ValueError,
"Object is of type 'CustomClass', but must be a Django Model, "
"Manager, or QuerySet",
get_object_or_404, CustomClass, title__icontains="Run"
)
# Works for lists too
self.assertRaisesMessage(
ValueError,
"Object is of type 'list', but must be a Django Model, Manager, "
"or QuerySet",
get_list_or_404, [Article], title__icontains="Run"
)
| bsd-3-clause |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/xml/etree/ElementInclude.py | 128 | 5151 | #
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an OSError exception.
# @throws OSError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
with open(href, 'rb') as file:
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
with open(href, 'r', encoding=encoding) as file:
data = file.read()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws OSError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| lgpl-3.0 |
dchilds7/Deysha-Star-Formation | vispy/gloo/gl/_es2.py | 21 | 38240 | """
THIS CODE IS AUTO-GENERATED. DO NOT EDIT.
GL ES 2.0 API (via Angle/DirectX on Windows)
"""
import ctypes
from .es2 import _lib
_lib.glActiveTexture.argtypes = ctypes.c_uint,
# void = glActiveTexture(GLenum texture)
def glActiveTexture(texture):
_lib.glActiveTexture(texture)
_lib.glAttachShader.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glAttachShader(GLuint program, GLuint shader)
def glAttachShader(program, shader):
_lib.glAttachShader(program, shader)
_lib.glBindAttribLocation.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_char_p,
# void = glBindAttribLocation(GLuint program, GLuint index, GLchar* name)
def glBindAttribLocation(program, index, name):
name = ctypes.c_char_p(name.encode('utf-8'))
res = _lib.glBindAttribLocation(program, index, name)
_lib.glBindBuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindBuffer(GLenum target, GLuint buffer)
def glBindBuffer(target, buffer):
_lib.glBindBuffer(target, buffer)
_lib.glBindFramebuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindFramebuffer(GLenum target, GLuint framebuffer)
def glBindFramebuffer(target, framebuffer):
_lib.glBindFramebuffer(target, framebuffer)
_lib.glBindRenderbuffer.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindRenderbuffer(GLenum target, GLuint renderbuffer)
def glBindRenderbuffer(target, renderbuffer):
_lib.glBindRenderbuffer(target, renderbuffer)
_lib.glBindTexture.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBindTexture(GLenum target, GLuint texture)
def glBindTexture(target, texture):
_lib.glBindTexture(target, texture)
_lib.glBlendColor.argtypes = ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
# void = glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
def glBlendColor(red, green, blue, alpha):
_lib.glBlendColor(red, green, blue, alpha)
_lib.glBlendEquation.argtypes = ctypes.c_uint,
# void = glBlendEquation(GLenum mode)
def glBlendEquation(mode):
_lib.glBlendEquation(mode)
_lib.glBlendEquationSeparate.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha)
def glBlendEquationSeparate(modeRGB, modeAlpha):
_lib.glBlendEquationSeparate(modeRGB, modeAlpha)
_lib.glBlendFunc.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glBlendFunc(GLenum sfactor, GLenum dfactor)
def glBlendFunc(sfactor, dfactor):
_lib.glBlendFunc(sfactor, dfactor)
_lib.glBlendFuncSeparate.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glBlendFuncSeparate(GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha)
def glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha):
_lib.glBlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha)
_lib.glBufferData.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_void_p, ctypes.c_uint,
# void = glBufferData(GLenum target, GLsizeiptr size, GLvoid* data, GLenum usage)
def glBufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
res = _lib.glBufferData(target, size, data, usage)
_lib.glBufferSubData.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
# void = glBufferSubData(GLenum target, GLintptr offset, GLsizeiptr size, GLvoid* data)
def glBufferSubData(target, offset, data):
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
res = _lib.glBufferSubData(target, offset, size, data)
_lib.glCheckFramebufferStatus.argtypes = ctypes.c_uint,
_lib.glCheckFramebufferStatus.restype = ctypes.c_uint
# GLenum = glCheckFramebufferStatus(GLenum target)
def glCheckFramebufferStatus(target):
return _lib.glCheckFramebufferStatus(target)
_lib.glClear.argtypes = ctypes.c_uint,
# void = glClear(GLbitfield mask)
def glClear(mask):
_lib.glClear(mask)
_lib.glClearColor.argtypes = ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
# void = glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
def glClearColor(red, green, blue, alpha):
_lib.glClearColor(red, green, blue, alpha)
_lib.glClearDepthf.argtypes = ctypes.c_float,
# void = glClearDepthf(GLclampf depth)
def glClearDepth(depth):
_lib.glClearDepthf(depth)
_lib.glClearStencil.argtypes = ctypes.c_int,
# void = glClearStencil(GLint s)
def glClearStencil(s):
_lib.glClearStencil(s)
_lib.glColorMask.argtypes = ctypes.c_bool, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool,
# void = glColorMask(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)
def glColorMask(red, green, blue, alpha):
_lib.glColorMask(red, green, blue, alpha)
_lib.glCompileShader.argtypes = ctypes.c_uint,
# void = glCompileShader(GLuint shader)
def glCompileShader(shader):
_lib.glCompileShader(shader)
_lib.glCompressedTexImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
# void = glCompressedTexImage2D(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, GLvoid* data)
def glCompressedTexImage2D(target, level, internalformat, width, height, border, data):
# border = 0 # set in args
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
res = _lib.glCompressedTexImage2D(target, level, internalformat, width, height, border, imageSize, data)
_lib.glCompressedTexSubImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_void_p,
# void = glCompressedTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, GLvoid* data)
def glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, data):
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
res = _lib.glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, imageSize, data)
_lib.glCopyTexImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glCopyTexImage2D(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border)
def glCopyTexImage2D(target, level, internalformat, x, y, width, height, border):
_lib.glCopyTexImage2D(target, level, internalformat, x, y, width, height, border)
_lib.glCopyTexSubImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glCopyTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height)
def glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height):
_lib.glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height)
_lib.glCreateProgram.argtypes = ()
_lib.glCreateProgram.restype = ctypes.c_uint
# GLuint = glCreateProgram()
def glCreateProgram():
return _lib.glCreateProgram()
_lib.glCreateShader.argtypes = ctypes.c_uint,
_lib.glCreateShader.restype = ctypes.c_uint
# GLuint = glCreateShader(GLenum type)
def glCreateShader(type):
return _lib.glCreateShader(type)
_lib.glCullFace.argtypes = ctypes.c_uint,
# void = glCullFace(GLenum mode)
def glCullFace(mode):
_lib.glCullFace(mode)
_lib.glDeleteBuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteBuffers(GLsizei n, GLuint* buffers)
def glDeleteBuffer(buffer):
n = 1
buffers = (ctypes.c_uint*n)(buffer)
res = _lib.glDeleteBuffers(n, buffers)
_lib.glDeleteFramebuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteFramebuffers(GLsizei n, GLuint* framebuffers)
def glDeleteFramebuffer(framebuffer):
n = 1
framebuffers = (ctypes.c_uint*n)(framebuffer)
res = _lib.glDeleteFramebuffers(n, framebuffers)
_lib.glDeleteProgram.argtypes = ctypes.c_uint,
# void = glDeleteProgram(GLuint program)
def glDeleteProgram(program):
_lib.glDeleteProgram(program)
_lib.glDeleteRenderbuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteRenderbuffers(GLsizei n, GLuint* renderbuffers)
def glDeleteRenderbuffer(renderbuffer):
n = 1
renderbuffers = (ctypes.c_uint*n)(renderbuffer)
res = _lib.glDeleteRenderbuffers(n, renderbuffers)
_lib.glDeleteShader.argtypes = ctypes.c_uint,
# void = glDeleteShader(GLuint shader)
def glDeleteShader(shader):
_lib.glDeleteShader(shader)
_lib.glDeleteTextures.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glDeleteTextures(GLsizei n, GLuint* textures)
def glDeleteTexture(texture):
n = 1
textures = (ctypes.c_uint*n)(texture)
res = _lib.glDeleteTextures(n, textures)
_lib.glDepthFunc.argtypes = ctypes.c_uint,
# void = glDepthFunc(GLenum func)
def glDepthFunc(func):
_lib.glDepthFunc(func)
_lib.glDepthMask.argtypes = ctypes.c_bool,
# void = glDepthMask(GLboolean flag)
def glDepthMask(flag):
_lib.glDepthMask(flag)
_lib.glDepthRangef.argtypes = ctypes.c_float, ctypes.c_float,
# void = glDepthRangef(GLclampf zNear, GLclampf zFar)
def glDepthRange(zNear, zFar):
_lib.glDepthRangef(zNear, zFar)
_lib.glDetachShader.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glDetachShader(GLuint program, GLuint shader)
def glDetachShader(program, shader):
_lib.glDetachShader(program, shader)
_lib.glDisable.argtypes = ctypes.c_uint,
# void = glDisable(GLenum cap)
def glDisable(cap):
_lib.glDisable(cap)
_lib.glDisableVertexAttribArray.argtypes = ctypes.c_uint,
# void = glDisableVertexAttribArray(GLuint index)
def glDisableVertexAttribArray(index):
_lib.glDisableVertexAttribArray(index)
_lib.glDrawArrays.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int,
# void = glDrawArrays(GLenum mode, GLint first, GLsizei count)
def glDrawArrays(mode, first, count):
_lib.glDrawArrays(mode, first, count)
_lib.glDrawElements.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_void_p,
# void = glDrawElements(GLenum mode, GLsizei count, GLenum type, GLvoid* indices)
def glDrawElements(mode, count, type, offset):
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
indices = offset
res = _lib.glDrawElements(mode, count, type, indices)
_lib.glEnable.argtypes = ctypes.c_uint,
# void = glEnable(GLenum cap)
def glEnable(cap):
_lib.glEnable(cap)
_lib.glEnableVertexAttribArray.argtypes = ctypes.c_uint,
# void = glEnableVertexAttribArray(GLuint index)
def glEnableVertexAttribArray(index):
_lib.glEnableVertexAttribArray(index)
_lib.glFinish.argtypes = ()
# void = glFinish()
def glFinish():
_lib.glFinish()
_lib.glFlush.argtypes = ()
# void = glFlush()
def glFlush():
_lib.glFlush()
_lib.glFramebufferRenderbuffer.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)
def glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer):
_lib.glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer)
_lib.glFramebufferTexture2D.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_int,
# void = glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)
def glFramebufferTexture2D(target, attachment, textarget, texture, level):
_lib.glFramebufferTexture2D(target, attachment, textarget, texture, level)
_lib.glFrontFace.argtypes = ctypes.c_uint,
# void = glFrontFace(GLenum mode)
def glFrontFace(mode):
_lib.glFrontFace(mode)
_lib.glGenBuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenBuffers(GLsizei n, GLuint* buffers)
def glCreateBuffer():
n = 1
buffers = (ctypes.c_uint*n)()
res = _lib.glGenBuffers(n, buffers)
return buffers[0]
_lib.glGenFramebuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenFramebuffers(GLsizei n, GLuint* framebuffers)
def glCreateFramebuffer():
n = 1
framebuffers = (ctypes.c_uint*n)()
res = _lib.glGenFramebuffers(n, framebuffers)
return framebuffers[0]
_lib.glGenRenderbuffers.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenRenderbuffers(GLsizei n, GLuint* renderbuffers)
def glCreateRenderbuffer():
n = 1
renderbuffers = (ctypes.c_uint*n)()
res = _lib.glGenRenderbuffers(n, renderbuffers)
return renderbuffers[0]
_lib.glGenTextures.argtypes = ctypes.c_int, ctypes.POINTER(ctypes.c_uint),
# void = glGenTextures(GLsizei n, GLuint* textures)
def glCreateTexture():
n = 1
textures = (ctypes.c_uint*n)()
res = _lib.glGenTextures(n, textures)
return textures[0]
_lib.glGenerateMipmap.argtypes = ctypes.c_uint,
# void = glGenerateMipmap(GLenum target)
def glGenerateMipmap(target):
_lib.glGenerateMipmap(target)
_lib.glGetActiveAttrib.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint), ctypes.c_char_p,
# void = glGetActiveAttrib(GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
def glGetActiveAttrib(program, index):
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
res = _lib.glGetActiveAttrib(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
_lib.glGetActiveUniform.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint), ctypes.c_char_p,
# void = glGetActiveUniform(GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
def glGetActiveUniform(program, index):
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
res = _lib.glGetActiveUniform(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
_lib.glGetAttachedShaders.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_uint),
# void = glGetAttachedShaders(GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders)
def glGetAttachedShaders(program):
maxcount = 256
count = (ctypes.c_int*1)()
shaders = (ctypes.c_uint*maxcount)()
res = _lib.glGetAttachedShaders(program, maxcount, count, shaders)
return tuple(shaders[:count[0]])
_lib.glGetAttribLocation.argtypes = ctypes.c_uint, ctypes.c_char_p,
_lib.glGetAttribLocation.restype = ctypes.c_int
# GLint = glGetAttribLocation(GLuint program, GLchar* name)
def glGetAttribLocation(program, name):
name = ctypes.c_char_p(name.encode('utf-8'))
res = _lib.glGetAttribLocation(program, name)
return res
_lib.glGetBooleanv.argtypes = ctypes.c_uint, ctypes.POINTER(ctypes.c_bool),
# void = glGetBooleanv(GLenum pname, GLboolean* params)
def _glGetBooleanv(pname):
params = (ctypes.c_bool*1)()
res = _lib.glGetBooleanv(pname, params)
return params[0]
_lib.glGetBufferParameteriv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetBufferParameteriv(GLenum target, GLenum pname, GLint* params)
def glGetBufferParameter(target, pname):
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
res = _lib.glGetBufferParameteriv(target, pname, params)
return params[0]
_lib.glGetError.argtypes = ()
_lib.glGetError.restype = ctypes.c_uint
# GLenum = glGetError()
def glGetError():
return _lib.glGetError()
_lib.glGetFloatv.argtypes = ctypes.c_uint, ctypes.POINTER(ctypes.c_float),
# void = glGetFloatv(GLenum pname, GLfloat* params)
def _glGetFloatv(pname):
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
res = _lib.glGetFloatv(pname, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
_lib.glGetFramebufferAttachmentParameteriv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetFramebufferAttachmentParameteriv(GLenum target, GLenum attachment, GLenum pname, GLint* params)
def glGetFramebufferAttachmentParameter(target, attachment, pname):
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
res = _lib.glGetFramebufferAttachmentParameteriv(target, attachment, pname, params)
return params[0]
_lib.glGetIntegerv.argtypes = ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetIntegerv(GLenum pname, GLint* params)
def _glGetIntegerv(pname):
n = 16
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*n)(*[d for i in range(n)])
res = _lib.glGetIntegerv(pname, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
_lib.glGetProgramInfoLog.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.c_char_p,
# void = glGetProgramInfoLog(GLuint program, GLsizei bufsize, GLsizei* length, GLchar* infolog)
def glGetProgramInfoLog(program):
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
res = _lib.glGetProgramInfoLog(program, bufsize, length, infolog)
return infolog[:length[0]].decode('utf-8')
_lib.glGetProgramiv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetProgramiv(GLuint program, GLenum pname, GLint* params)
def glGetProgramParameter(program, pname):
params = (ctypes.c_int*1)()
res = _lib.glGetProgramiv(program, pname, params)
return params[0]
_lib.glGetRenderbufferParameteriv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetRenderbufferParameteriv(GLenum target, GLenum pname, GLint* params)
def glGetRenderbufferParameter(target, pname):
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
res = _lib.glGetRenderbufferParameteriv(target, pname, params)
return params[0]
_lib.glGetShaderInfoLog.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.c_char_p,
# void = glGetShaderInfoLog(GLuint shader, GLsizei bufsize, GLsizei* length, GLchar* infolog)
def glGetShaderInfoLog(shader):
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
res = _lib.glGetShaderInfoLog(shader, bufsize, length, infolog)
return infolog[:length[0]].decode('utf-8')
_lib.glGetShaderPrecisionFormat.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
# void = glGetShaderPrecisionFormat(GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision)
def glGetShaderPrecisionFormat(shadertype, precisiontype):
range = (ctypes.c_int*1)()
precision = (ctypes.c_int*1)()
res = _lib.glGetShaderPrecisionFormat(shadertype, precisiontype, range, precision)
return range[0], precision[0]
_lib.glGetShaderSource.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.c_char_p,
# void = glGetShaderSource(GLuint shader, GLsizei bufsize, GLsizei* length, GLchar* source)
def glGetShaderSource(shader):
bufsize = 1024*1024
length = (ctypes.c_int*1)()
source = (ctypes.c_char*bufsize)()
res = _lib.glGetShaderSource(shader, bufsize, length, source)
return source.value[:length[0]].decode('utf-8')
_lib.glGetShaderiv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
# void = glGetShaderiv(GLuint shader, GLenum pname, GLint* params)
def glGetShaderParameter(shader, pname):
params = (ctypes.c_int*1)()
res = _lib.glGetShaderiv(shader, pname, params)
return params[0]
_lib.glGetString.argtypes = ctypes.c_uint,
_lib.glGetString.restype = ctypes.c_char_p
# GLubyte* = glGetString(GLenum name)
def glGetParameter(pname):
if pname in [33902, 33901, 32773, 3106, 2931, 2928,
2849, 32824, 10752, 32938]:
# GL_ALIASED_LINE_WIDTH_RANGE GL_ALIASED_POINT_SIZE_RANGE
# GL_BLEND_COLOR GL_COLOR_CLEAR_VALUE GL_DEPTH_CLEAR_VALUE
# GL_DEPTH_RANGE GL_LINE_WIDTH GL_POLYGON_OFFSET_FACTOR
# GL_POLYGON_OFFSET_UNITS GL_SAMPLE_COVERAGE_VALUE
return _glGetFloatv(pname)
elif pname in [7936, 7937, 7938, 35724, 7939]:
# GL_VENDOR, GL_RENDERER, GL_VERSION, GL_SHADING_LANGUAGE_VERSION,
# GL_EXTENSIONS are strings
pass # string handled below
else:
return _glGetIntegerv(pname)
name = pname
res = _lib.glGetString(name)
return ctypes.string_at(res).decode('utf-8') if res else ''
_lib.glGetTexParameterfv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_float),
# void = glGetTexParameterfv(GLenum target, GLenum pname, GLfloat* params)
def glGetTexParameter(target, pname):
d = float('Inf')
params = (ctypes.c_float*1)(d)
res = _lib.glGetTexParameterfv(target, pname, params)
return params[0]
_lib.glGetUniformfv.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
# void = glGetUniformfv(GLuint program, GLint location, GLfloat* params)
def glGetUniform(program, location):
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
res = _lib.glGetUniformfv(program, location, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
_lib.glGetUniformLocation.argtypes = ctypes.c_uint, ctypes.c_char_p,
_lib.glGetUniformLocation.restype = ctypes.c_int
# GLint = glGetUniformLocation(GLuint program, GLchar* name)
def glGetUniformLocation(program, name):
name = ctypes.c_char_p(name.encode('utf-8'))
res = _lib.glGetUniformLocation(program, name)
return res
_lib.glGetVertexAttribfv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_float),
# void = glGetVertexAttribfv(GLuint index, GLenum pname, GLfloat* params)
def glGetVertexAttrib(index, pname):
n = 4
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
res = _lib.glGetVertexAttribfv(index, pname, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
_lib.glGetVertexAttribPointerv.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_void_p),
# void = glGetVertexAttribPointerv(GLuint index, GLenum pname, GLvoid** pointer)
def glGetVertexAttribOffset(index, pname):
pointer = (ctypes.c_void_p*1)()
res = _lib.glGetVertexAttribPointerv(index, pname, pointer)
return pointer[0] or 0
_lib.glHint.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glHint(GLenum target, GLenum mode)
def glHint(target, mode):
_lib.glHint(target, mode)
_lib.glIsBuffer.argtypes = ctypes.c_uint,
_lib.glIsBuffer.restype = ctypes.c_bool
# GLboolean = glIsBuffer(GLuint buffer)
def glIsBuffer(buffer):
return _lib.glIsBuffer(buffer)
_lib.glIsEnabled.argtypes = ctypes.c_uint,
_lib.glIsEnabled.restype = ctypes.c_bool
# GLboolean = glIsEnabled(GLenum cap)
def glIsEnabled(cap):
return _lib.glIsEnabled(cap)
_lib.glIsFramebuffer.argtypes = ctypes.c_uint,
_lib.glIsFramebuffer.restype = ctypes.c_bool
# GLboolean = glIsFramebuffer(GLuint framebuffer)
def glIsFramebuffer(framebuffer):
return _lib.glIsFramebuffer(framebuffer)
_lib.glIsProgram.argtypes = ctypes.c_uint,
_lib.glIsProgram.restype = ctypes.c_bool
# GLboolean = glIsProgram(GLuint program)
def glIsProgram(program):
return _lib.glIsProgram(program)
_lib.glIsRenderbuffer.argtypes = ctypes.c_uint,
_lib.glIsRenderbuffer.restype = ctypes.c_bool
# GLboolean = glIsRenderbuffer(GLuint renderbuffer)
def glIsRenderbuffer(renderbuffer):
return _lib.glIsRenderbuffer(renderbuffer)
_lib.glIsShader.argtypes = ctypes.c_uint,
_lib.glIsShader.restype = ctypes.c_bool
# GLboolean = glIsShader(GLuint shader)
def glIsShader(shader):
return _lib.glIsShader(shader)
_lib.glIsTexture.argtypes = ctypes.c_uint,
_lib.glIsTexture.restype = ctypes.c_bool
# GLboolean = glIsTexture(GLuint texture)
def glIsTexture(texture):
return _lib.glIsTexture(texture)
_lib.glLineWidth.argtypes = ctypes.c_float,
# void = glLineWidth(GLfloat width)
def glLineWidth(width):
_lib.glLineWidth(width)
_lib.glLinkProgram.argtypes = ctypes.c_uint,
# void = glLinkProgram(GLuint program)
def glLinkProgram(program):
_lib.glLinkProgram(program)
_lib.glPixelStorei.argtypes = ctypes.c_uint, ctypes.c_int,
# void = glPixelStorei(GLenum pname, GLint param)
def glPixelStorei(pname, param):
_lib.glPixelStorei(pname, param)
_lib.glPolygonOffset.argtypes = ctypes.c_float, ctypes.c_float,
# void = glPolygonOffset(GLfloat factor, GLfloat units)
def glPolygonOffset(factor, units):
_lib.glPolygonOffset(factor, units)
_lib.glReadPixels.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p,
# void = glReadPixels(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid* pixels)
def glReadPixels(x, y, width, height, format, type):
# GL_ALPHA, GL_RGB, GL_RGBA
t = {6406:1, 6407:3, 6408:4}[format]
# GL_UNSIGNED_BYTE, GL_FLOAT
nb = {5121:1, 5126:4}[type]
size = int(width*height*t*nb)
pixels = ctypes.create_string_buffer(size)
res = _lib.glReadPixels(x, y, width, height, format, type, pixels)
return pixels[:]
_lib.glRenderbufferStorage.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.c_int,
# void = glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height)
def glRenderbufferStorage(target, internalformat, width, height):
_lib.glRenderbufferStorage(target, internalformat, width, height)
_lib.glSampleCoverage.argtypes = ctypes.c_float, ctypes.c_bool,
# void = glSampleCoverage(GLclampf value, GLboolean invert)
def glSampleCoverage(value, invert):
_lib.glSampleCoverage(value, invert)
_lib.glScissor.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glScissor(GLint x, GLint y, GLsizei width, GLsizei height)
def glScissor(x, y, width, height):
_lib.glScissor(x, y, width, height)
_lib.glShaderSource.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_int),
# void = glShaderSource(GLuint shader, GLsizei count, GLchar** string, GLint* length)
def glShaderSource(shader, source):
# Some implementation do not like getting a list of single chars
if isinstance(source, (tuple, list)):
strings = [s for s in source]
else:
strings = [source]
count = len(strings)
string = (ctypes.c_char_p*count)(*[s.encode('utf-8') for s in strings])
length = (ctypes.c_int*count)(*[len(s) for s in strings])
res = _lib.glShaderSource(shader, count, string, length)
_lib.glStencilFunc.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint,
# void = glStencilFunc(GLenum func, GLint ref, GLuint mask)
def glStencilFunc(func, ref, mask):
_lib.glStencilFunc(func, ref, mask)
_lib.glStencilFuncSeparate.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int, ctypes.c_uint,
# void = glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask)
def glStencilFuncSeparate(face, func, ref, mask):
_lib.glStencilFuncSeparate(face, func, ref, mask)
_lib.glStencilMask.argtypes = ctypes.c_uint,
# void = glStencilMask(GLuint mask)
def glStencilMask(mask):
_lib.glStencilMask(mask)
_lib.glStencilMaskSeparate.argtypes = ctypes.c_uint, ctypes.c_uint,
# void = glStencilMaskSeparate(GLenum face, GLuint mask)
def glStencilMaskSeparate(face, mask):
_lib.glStencilMaskSeparate(face, mask)
_lib.glStencilOp.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glStencilOp(GLenum fail, GLenum zfail, GLenum zpass)
def glStencilOp(fail, zfail, zpass):
_lib.glStencilOp(fail, zfail, zpass)
_lib.glStencilOpSeparate.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
# void = glStencilOpSeparate(GLenum face, GLenum fail, GLenum zfail, GLenum zpass)
def glStencilOpSeparate(face, fail, zfail, zpass):
_lib.glStencilOpSeparate(face, fail, zfail, zpass)
_lib.glTexImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p,
# void = glTexImage2D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, GLvoid* pixels)
def glTexImage2D(target, level, internalformat, format, type, pixels):
border = 0
if isinstance(pixels, (tuple, list)):
height, width = pixels
pixels = ctypes.c_void_p(0)
pixels = None
else:
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
res = _lib.glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels)
_lib.glTexParameterf.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_float,
def glTexParameterf(target, pname, param):
_lib.glTexParameterf(target, pname, param)
_lib.glTexParameteri.argtypes = ctypes.c_uint, ctypes.c_uint, ctypes.c_int,
def glTexParameteri(target, pname, param):
_lib.glTexParameteri(target, pname, param)
_lib.glTexSubImage2D.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p,
# void = glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid* pixels)
def glTexSubImage2D(target, level, xoffset, yoffset, format, type, pixels):
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
res = _lib.glTexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels)
_lib.glUniform1f.argtypes = ctypes.c_int, ctypes.c_float,
def glUniform1f(location, v1):
_lib.glUniform1f(location, v1)
_lib.glUniform2f.argtypes = ctypes.c_int, ctypes.c_float, ctypes.c_float,
def glUniform2f(location, v1, v2):
_lib.glUniform2f(location, v1, v2)
_lib.glUniform3f.argtypes = ctypes.c_int, ctypes.c_float, ctypes.c_float, ctypes.c_float,
def glUniform3f(location, v1, v2, v3):
_lib.glUniform3f(location, v1, v2, v3)
_lib.glUniform4f.argtypes = ctypes.c_int, ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
def glUniform4f(location, v1, v2, v3, v4):
_lib.glUniform4f(location, v1, v2, v3, v4)
_lib.glUniform1i.argtypes = ctypes.c_int, ctypes.c_int,
def glUniform1i(location, v1):
_lib.glUniform1i(location, v1)
_lib.glUniform2i.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int,
def glUniform2i(location, v1, v2):
_lib.glUniform2i(location, v1, v2)
_lib.glUniform3i.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
def glUniform3i(location, v1, v2, v3):
_lib.glUniform3i(location, v1, v2, v3)
_lib.glUniform4i.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
def glUniform4i(location, v1, v2, v3, v4):
_lib.glUniform4i(location, v1, v2, v3, v4)
_lib.glUniform1fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
def glUniform1fv(location, count, values):
values = [float(val) for val in values]
values = (ctypes.c_float*len(values))(*values)
_lib.glUniform1fv(location, count, values)
_lib.glUniform2fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
def glUniform2fv(location, count, values):
values = [float(val) for val in values]
values = (ctypes.c_float*len(values))(*values)
_lib.glUniform2fv(location, count, values)
_lib.glUniform3fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
def glUniform3fv(location, count, values):
values = [float(val) for val in values]
values = (ctypes.c_float*len(values))(*values)
_lib.glUniform3fv(location, count, values)
_lib.glUniform4fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float),
def glUniform4fv(location, count, values):
values = [float(val) for val in values]
values = (ctypes.c_float*len(values))(*values)
_lib.glUniform4fv(location, count, values)
_lib.glUniform1iv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int),
def glUniform1iv(location, count, values):
values = [int(val) for val in values]
values = (ctypes.c_int*len(values))(*values)
_lib.glUniform1iv(location, count, values)
_lib.glUniform2iv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int),
def glUniform2iv(location, count, values):
values = [int(val) for val in values]
values = (ctypes.c_int*len(values))(*values)
_lib.glUniform2iv(location, count, values)
_lib.glUniform3iv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int),
def glUniform3iv(location, count, values):
values = [int(val) for val in values]
values = (ctypes.c_int*len(values))(*values)
_lib.glUniform3iv(location, count, values)
_lib.glUniform4iv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int),
def glUniform4iv(location, count, values):
values = [int(val) for val in values]
values = (ctypes.c_int*len(values))(*values)
_lib.glUniform4iv(location, count, values)
_lib.glUniformMatrix2fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_bool, ctypes.POINTER(ctypes.c_float),
def glUniformMatrix2fv(location, count, transpose, values):
if not values.flags["C_CONTIGUOUS"]:
values = values.copy()
assert values.dtype.name == "float32"
values_ = values
values = values_.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_lib.glUniformMatrix2fv(location, count, transpose, values)
_lib.glUniformMatrix3fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_bool, ctypes.POINTER(ctypes.c_float),
def glUniformMatrix3fv(location, count, transpose, values):
if not values.flags["C_CONTIGUOUS"]:
values = values.copy()
assert values.dtype.name == "float32"
values_ = values
values = values_.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_lib.glUniformMatrix3fv(location, count, transpose, values)
_lib.glUniformMatrix4fv.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_bool, ctypes.POINTER(ctypes.c_float),
def glUniformMatrix4fv(location, count, transpose, values):
if not values.flags["C_CONTIGUOUS"]:
values = values.copy()
assert values.dtype.name == "float32"
values_ = values
values = values_.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_lib.glUniformMatrix4fv(location, count, transpose, values)
_lib.glUseProgram.argtypes = ctypes.c_uint,
# void = glUseProgram(GLuint program)
def glUseProgram(program):
_lib.glUseProgram(program)
_lib.glValidateProgram.argtypes = ctypes.c_uint,
# void = glValidateProgram(GLuint program)
def glValidateProgram(program):
_lib.glValidateProgram(program)
_lib.glVertexAttrib1f.argtypes = ctypes.c_uint, ctypes.c_float,
def glVertexAttrib1f(index, v1):
_lib.glVertexAttrib1f(index, v1)
_lib.glVertexAttrib2f.argtypes = ctypes.c_uint, ctypes.c_float, ctypes.c_float,
def glVertexAttrib2f(index, v1, v2):
_lib.glVertexAttrib2f(index, v1, v2)
_lib.glVertexAttrib3f.argtypes = ctypes.c_uint, ctypes.c_float, ctypes.c_float, ctypes.c_float,
def glVertexAttrib3f(index, v1, v2, v3):
_lib.glVertexAttrib3f(index, v1, v2, v3)
_lib.glVertexAttrib4f.argtypes = ctypes.c_uint, ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float,
def glVertexAttrib4f(index, v1, v2, v3, v4):
_lib.glVertexAttrib4f(index, v1, v2, v3, v4)
_lib.glVertexAttribPointer.argtypes = ctypes.c_uint, ctypes.c_int, ctypes.c_uint, ctypes.c_bool, ctypes.c_int, ctypes.c_void_p,
# void = glVertexAttribPointer(GLuint indx, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLvoid* ptr)
def glVertexAttribPointer(indx, size, type, normalized, stride, offset):
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
# We need to ensure that the data exists at draw time :(
# PyOpenGL does this too
key = '_vert_attr_'+str(indx)
setattr(glVertexAttribPointer, key, offset_)
ptr = offset
res = _lib.glVertexAttribPointer(indx, size, type, normalized, stride, ptr)
_lib.glViewport.argtypes = ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
# void = glViewport(GLint x, GLint y, GLsizei width, GLsizei height)
def glViewport(x, y, width, height):
_lib.glViewport(x, y, width, height)
| bsd-3-clause |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/resnet/implementations/mxnet/common/dali.py | 1 | 17478 | import warnings
from nvidia import dali
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.plugin.mxnet import DALIClassificationIterator
import horovod.mxnet as hvd
from mlperf_compliance import constants as mlperf_constants
from mlperf_log_utils import mx_resnet_print
import os
import tempfile
def add_dali_args(parser):
group = parser.add_argument_group('DALI', 'pipeline and augumentation')
group.add_argument('--data-train', type=str, help='the training data')
group.add_argument('--data-train-idx', type=str, default='', help='the index of training data')
group.add_argument('--data-val', type=str, help='the validation data')
group.add_argument('--data-val-idx', type=str, default='', help='the index of validation data')
group.add_argument('--use-dali', action='store_true',
help='use dalli pipeline and augunetation')
group.add_argument('--max-random-area', type=float, default=1,
help='max area to crop in random resized crop, whose range is [0, 1]')
group.add_argument('--min-random-area', type=float, default=1,
help='min area to crop in random resized crop, whose range is [0, 1]')
group.add_argument('--separ-val', action='store_true',
help='each process will perform independent validation on whole val-set')
group.add_argument('--min-random-aspect-ratio', type=float, default=3./4.,
help='min value of aspect ratio, whose value is either None or a positive value.')
group.add_argument('--max-random-aspect-ratio', type=float, default=4./3.,
help='max value of aspect ratio. If min_random_aspect_ratio is None, '
'the aspect ratio range is [1-max_random_aspect_ratio, '
'1+max_random_aspect_ratio], otherwise it is '
'[min_random_aspect_ratio, max_random_aspect_ratio].')
group.add_argument('--dali-threads', type=int, default=3, help="number of threads" +\
"per GPU for DALI")
group.add_argument('--image-shape', type=str,
help='the image shape feed into the network, e.g. (3,224,224)')
group.add_argument('--num-examples', type=int, help='the number of training examples')
group.add_argument('--dali-prefetch-queue', type=int, default=3, help="DALI prefetch queue depth")
group.add_argument('--dali-nvjpeg-memory-padding', type=int, default=16, help="Memory padding value for nvJPEG (in MB)")
group.add_argument('--dali-roi-decode', default=False, type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
help='use ROI decode, available starting in DALI 0.8')
group.add_argument('--dali-cache-size', type=int, default=0,
help='Cache decoded images with static shards with the specified cache size '
' (in MB), available starting in DALI 0.8')
group.add_argument('--lazy_init_sanity', action='store_true',
help='makes sure that data is not touched during the lazy init, '
'user need to clean up /tmp from symlinks created there')
return parser
_mean_pixel = [255 * x for x in (0.485, 0.456, 0.406)]
_std_pixel = [255 * x for x in (0.229, 0.224, 0.225)]
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
min_random_area, max_random_area,
min_random_aspect_ratio, max_random_aspect_ratio,
nvjpeg_padding, prefetch_queue=3,
seed=12,
output_layout=types.NCHW, pad_output=True, dtype='float16',
mlperf_print=True, use_roi_decode=False, cache_size=0):
super(HybridTrainPipe, self).__init__(
batch_size, num_threads, device_id,
seed = seed + device_id,
prefetch_queue_depth = prefetch_queue)
if cache_size > 0:
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards,
stick_to_shard=True, lazy_init=True, skip_cached_images=True)
else: # stick_to_shard might not exist in this version of DALI.
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=True, shard_id=shard_id, num_shards=num_shards)
if use_roi_decode and cache_size == 0:
self.decode = ops.nvJPEGDecoderRandomCrop(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding,
random_area = [
min_random_area,
max_random_area],
random_aspect_ratio = [
min_random_aspect_ratio,
max_random_aspect_ratio])
self.rrc = ops.Resize(device = "gpu", resize_x=crop_shape[0], resize_y=crop_shape[1])
else:
if cache_size > 0:
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding,
cache_type='threshold',
cache_size=cache_size,
cache_threshold=0,
cache_debug=False)
else:
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.rrc = ops.RandomResizedCrop(device = "gpu",
random_area = [
min_random_area,
max_random_area],
random_aspect_ratio = [
min_random_aspect_ratio,
max_random_aspect_ratio],
size = crop_shape)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.rrc(images)
output = self.cmnp(images, mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, rec_path, idx_path,
shard_id, num_shards, crop_shape,
nvjpeg_padding, prefetch_queue=3,
seed=12, resize_shp=None,
output_layout=types.NCHW, pad_output=True, dtype='float16',
mlperf_print=True, cache_size=0):
super(HybridValPipe, self).__init__(
batch_size, num_threads, device_id,
seed = seed + device_id,
prefetch_queue_depth = prefetch_queue)
if cache_size > 0:
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=False, shard_id=shard_id, num_shards=num_shards,
stick_to_shard=True, lazy_init=True, skip_cached_images=True)
else: # stick_to_shard might not exist in this version of DALI.
self.input = ops.MXNetReader(path = [rec_path], index_path=[idx_path],
random_shuffle=False, shard_id=shard_id, num_shards=num_shards)
if cache_size > 0:
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding,
cache_type='threshold',
cache_size=cache_size,
cache_threshold=0,
cache_debug=False)
else:
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB,
device_memory_padding = nvjpeg_padding,
host_memory_padding = nvjpeg_padding)
self.resize = ops.Resize(device = "gpu", resize_shorter=resize_shp) if resize_shp else None
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT16 if dtype == 'float16' else types.FLOAT,
output_layout = output_layout,
crop = crop_shape,
pad_output = pad_output,
image_type = types.RGB,
mean = _mean_pixel,
std = _std_pixel)
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
if self.resize:
images = self.resize(images)
output = self.cmnp(images)
return [output, self.labels]
def _get_rank_and_worker_count(args, kv):
if 'horovod' in args.kv_store:
rank = hvd.rank()
num_workers = hvd.size()
else:
rank = kv.rank if kv else 0
num_workers = kv.num_workers if kv else 1
return (rank, num_workers)
def link_to_tmp_file(src, dst):
# tempfile.mkstemp will create a file with _dali at the end
# so when we add _tmp is will be still unique
tmp = dst + '_tmp'
os.symlink(src, tmp)
os.rename(tmp, dst)
def get_tmp_file():
fd, path = tempfile.mkstemp(suffix='_dali')
os.close(fd)
return path
def build_input_pipeline(args, kv=None):
# resize is default base length of shorter edge for dataset;
# all images will be reshaped to this size
resize = int(args.resize)
# target shape is final shape of images pipelined to network;
# all images will be cropped to this size
target_shape = tuple([int(l) for l in args.image_shape.split(',')])
pad_output = target_shape[0] == 4
gpus = list(map(int, filter(None, args.gpus.split(',')))) # filter to not encount eventually empty strings
batch_size = args.batch_size//len(gpus)
mx_resnet_print(
key=mlperf_constants.MODEL_BN_SPAN,
val=batch_size)
num_threads = args.dali_threads
# the input_layout w.r.t. the model is the output_layout of the image pipeline
output_layout = types.NHWC if args.input_layout == 'NHWC' else types.NCHW
(rank, num_workers) = _get_rank_and_worker_count(args, kv)
data_paths = {}
if args.dali_cache_size > 0 and args.lazy_init_sanity:
data_paths["train_data_tmp"] = get_tmp_file()
data_paths["train_idx_tmp"] = get_tmp_file()
data_paths["val_data_tmp"] = get_tmp_file()
data_paths["val_idx_tmp"] = get_tmp_file()
else:
data_paths["train_data_tmp"] = args.data_train
data_paths["train_idx_tmp"] = args.data_train_idx
data_paths["val_data_tmp"] = args.data_val
data_paths["val_idx_tmp"] = args.data_val_idx
trainpipes = [HybridTrainPipe(batch_size = batch_size,
num_threads = num_threads,
device_id = gpu_id,
rec_path = data_paths["train_data_tmp"],
idx_path = data_paths["train_idx_tmp"],
shard_id = gpus.index(gpu_id) + len(gpus)*rank,
num_shards = len(gpus)*num_workers,
crop_shape = target_shape[1:],
min_random_area = args.min_random_area,
max_random_area = args.max_random_area,
min_random_aspect_ratio = args.min_random_aspect_ratio,
max_random_aspect_ratio = args.max_random_aspect_ratio,
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue,
seed = args.seed,
output_layout = output_layout,
pad_output = pad_output,
dtype = args.dtype,
mlperf_print = gpu_id == gpus[0],
use_roi_decode = args.dali_roi_decode,
cache_size = args.dali_cache_size) for gpu_id in gpus]
valpipes = [HybridValPipe(batch_size = batch_size,
num_threads = num_threads,
device_id = gpu_id,
rec_path = data_paths["val_data_tmp"],
idx_path = data_paths["val_idx_tmp"],
shard_id = 0 if args.separ_val
else gpus.index(gpu_id) + len(gpus)*rank,
num_shards = 1 if args.separ_val else len(gpus)*num_workers,
crop_shape = target_shape[1:],
nvjpeg_padding = args.dali_nvjpeg_memory_padding * 1024 * 1024,
prefetch_queue = args.dali_prefetch_queue,
seed = args.seed,
resize_shp = resize,
output_layout = output_layout,
pad_output = pad_output,
dtype = args.dtype,
mlperf_print = gpu_id == gpus[0],
cache_size = args.dali_cache_size) for gpu_id in gpus] if args.data_val else None
[trainpipe.build() for trainpipe in trainpipes]
if args.data_val:
[valpipe.build() for valpipe in valpipes]
return lambda args, kv: get_rec_iter(args, trainpipes, valpipes, data_paths, kv)
def get_rec_iter(args, trainpipes, valpipes, data_paths, kv=None):
(rank, num_workers) = _get_rank_and_worker_count(args, kv)
# now data is available in the provided paths to DALI, it ensures that the data has not been touched
# user need to clean up the /tmp from the created symlinks
# DALIClassificationIterator() does the init so we need to provide the real data here
if args.dali_cache_size > 0 and args.lazy_init_sanity:
link_to_tmp_file(args.data_train, data_paths["train_data_tmp"])
link_to_tmp_file(args.data_train_idx, data_paths["train_idx_tmp"])
link_to_tmp_file(args.data_val, data_paths["val_data_tmp"])
link_to_tmp_file(args.data_val_idx, data_paths["val_idx_tmp"])
dali_train_iter = DALIClassificationIterator(trainpipes, args.num_examples // num_workers)
if args.num_examples < trainpipes[0].epoch_size("Reader"):
warnings.warn("{} training examples will be used, although full training set contains {} examples".format(args.num_examples, trainpipes[0].epoch_size("Reader")))
worker_val_examples = valpipes[0].epoch_size("Reader")
if not args.separ_val:
worker_val_examples = worker_val_examples // num_workers
if rank < valpipes[0].epoch_size("Reader") % num_workers:
worker_val_examples += 1
dali_val_iter = DALIClassificationIterator(valpipes, worker_val_examples, fill_last_batch = False) if args.data_val else None
return dali_train_iter, dali_val_iter
| apache-2.0 |
ergoregion/Rota-Program | Tests/test_Roles.py | 1 | 5042 | __author__ = 'Neil Butcher'
import unittest
from Rota_System import Roles
class RoleTest(unittest.TestCase):
def setUp(self):
Roles.GlobalRoleList.clear()
Roles.GlobalRoleList.add_role(Roles.Role('Baker', 'B', 10))
Roles.GlobalRoleList.add_role(Roles.Role('Steward', 'S', 9))
Roles.GlobalRoleList.add_role(Roles.Role('Fisherman', 'F', 7))
def tearDown(self):
Roles.GlobalRoleList.clear()
def testOuterCreation(self):
baker = Roles.role_from_code('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = Roles.role('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = Roles.role('Baker')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testCreation(self):
baker = Roles.role('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testListCreation(self):
roles = Roles.RoleList()
roles.all()
baker = roles.role_from_code('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = roles.role_from_code('B ')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testListInitCreation(self):
roles = Roles.RoleList('B')
self.assertEqual(len(roles.roles), 1, 'should be a role already')
def testLookup(self):
roles = Roles.RoleList()
roles.all()
self.assertTrue(roles.includes('S'), 'All roles should include steward')
self.assertTrue(roles.includes('B'), 'All roles should include baker')
self.assertTrue(roles.includes(Roles.role('B')), 'All roles should include baker as class')
self.assertTrue(roles.includes('S '), 'All roles should include steward')
def testSinglePopulatedList(self):
roles = Roles.RoleList()
roles.populate_from_codes('S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes('S '), 'This list should include steward')
def testSingleAddedList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes('S '), 'This list should include steward')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('B')
self.assertEqual(roles.number_of_roles(), 2)
def testMultiPopulatedList(self):
roles = Roles.RoleList()
roles.populate_from_codes('F S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('Steward')), 'This list should include steward')
self.assertTrue(roles.includes(Roles.role('F')), 'This list should also include fisherman')
def testMultiAddedList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('F')
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 2)
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes(Roles.role('Fisherman')), 'This list should also include fisherman')
def testRemovingList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.remove_code('S')
roles.remove_code('F')
self.assertEqual(roles.number_of_roles(), 0)
roles.add_code(' S')
roles.add_code('F ')
self.assertEqual(roles.number_of_roles(), 2)
roles.remove_code('S')
roles.remove_code('B')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('S')
roles.add_code('B')
self.assertEqual(roles.number_of_roles(), 3)
def testOutputList(self):
roles = Roles.RoleList()
roles.populate_from_codes('F S')
self.assertTrue('F' in roles.list_of_codes().split())
self.assertTrue('S' in roles.list_of_codes().split())
self.assertFalse('B' in roles.list_of_codes().split())
roles.all()
self.assertTrue('F' in roles.list_of_codes().split())
self.assertTrue('S' in roles.list_of_codes().split())
self.assertTrue('B' in roles.list_of_codes().split())
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main() | mit |
javierblasco/tensorflow | tensorflow/python/training/gradient_descent.py | 5 | 1615 | """GradientDescent for TensorFlow."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
# pylint: disable=unused-import
from tensorflow.python.ops import math_ops
# pylint: enable=unused-import
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
@@__init__
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operation.s
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
self._learning_rate_tensor,
grad,
use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
delta = ops.IndexedSlices(grad.values * self._learning_rate_tensor,
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
| apache-2.0 |
pizzathief/scipy | scipy/optimize/_shgo_lib/triangulation.py | 8 | 21494 | import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot # type: ignore[import]
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup(object):
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super(Cell, self).__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super(Simplex, self).__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
| bsd-3-clause |
ar7z1/ansible | test/units/modules/network/f5/test_bigip_device_trust.py | 8 | 5969 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_trust import Parameters
from library.modules.bigip_device_trust import ModuleManager
from library.modules.bigip_device_trust import ArgumentSpec
from library.modules.bigip_device_trust import HAS_F5SDK
from library.modules.bigip_device_trust import HAS_NETADDR
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_device_trust import Parameters
from ansible.modules.network.f5.bigip_device_trust import ModuleManager
from ansible.modules.network.f5.bigip_device_trust import ArgumentSpec
from ansible.modules.network.f5.bigip_device_trust import HAS_F5SDK
from ansible.modules.network.f5.bigip_device_trust import HAS_NETADDR
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
from ansible.modules.network.f5.bigip_device_trust import HAS_NETADDR
if not HAS_NETADDR:
raise SkipTest("F5 Ansible modules require the netaddr Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
def test_module_parameters_with_peer_type(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
type='peer'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
assert p.type is True
def test_module_parameters_with_subordinate_type(self):
args = dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
type='subordinate'
)
p = Parameters(params=args)
assert p.peer_server == '10.10.10.10'
assert p.peer_hostname == 'foo.bar.baz'
assert p.peer_user == 'admin'
assert p.peer_password == 'secret'
assert p.type is False
def test_hyphenated_peer_hostname(self):
args = dict(
peer_hostname='hn---hyphen____underscore.hmatsuda.local',
)
p = Parameters(params=args)
assert p.peer_hostname == 'hn---hyphen____underscore.hmatsuda.local'
def test_numbered_peer_hostname(self):
args = dict(
peer_hostname='BIG-IP_12x_ans2.example.local',
)
p = Parameters(params=args)
assert p.peer_hostname == 'BIG-IP_12x_ans2.example.local'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_device_trust(self, *args):
set_module_args(dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_device_trust_idempotent(self, *args):
set_module_args(dict(
peer_server='10.10.10.10',
peer_hostname='foo.bar.baz',
peer_user='admin',
peer_password='secret',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is False
| gpl-3.0 |
ferabra/edx-platform | lms/djangoapps/commerce/api/v0/views.py | 39 | 7242 | """ API v0 views. """
import logging
from ecommerce_api_client import exceptions
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.status import HTTP_406_NOT_ACCEPTABLE, HTTP_409_CONFLICT
from rest_framework.views import APIView
from commerce import ecommerce_api_client
from commerce.constants import Messages
from commerce.exceptions import InvalidResponseError
from commerce.http import DetailResponse, InternalRequestErrorResponse
from commerce.utils import audit_log
from course_modes.models import CourseMode
from courseware import courses
from embargo import api as embargo_api
from enrollment.api import add_enrollment
from enrollment.views import EnrollmentCrossDomainSessionAuth
from openedx.core.djangoapps.user_api.preferences.api import update_email_opt_in
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
from student.models import CourseEnrollment
from util.json_request import JsonResponse
log = logging.getLogger(__name__)
class BasketsView(APIView):
""" Creates a basket with a course seat and enrolls users. """
# LMS utilizes User.user_is_active to indicate email verification, not whether an account is active. Sigh!
authentication_classes = (EnrollmentCrossDomainSessionAuth, OAuth2AuthenticationAllowInactiveUser)
permission_classes = (IsAuthenticated,)
def _is_data_valid(self, request):
"""
Validates the data posted to the view.
Arguments
request -- HTTP request
Returns
Tuple (data_is_valid, course_key, error_msg)
"""
course_id = request.DATA.get('course_id')
if not course_id:
return False, None, u'Field course_id is missing.'
try:
course_key = CourseKey.from_string(course_id)
courses.get_course(course_key)
except (InvalidKeyError, ValueError)as ex:
log.exception(u'Unable to locate course matching %s.', course_id)
return False, None, ex.message
return True, course_key, None
def _enroll(self, course_key, user):
""" Enroll the user in the course. """
add_enrollment(user.username, unicode(course_key))
def _handle_marketing_opt_in(self, request, course_key, user):
"""
Handle the marketing email opt-in flag, if it was set.
Errors here aren't expected, but should not break the outer enrollment transaction.
"""
email_opt_in = request.DATA.get('email_opt_in', None)
if email_opt_in is not None:
try:
update_email_opt_in(user, course_key.org, email_opt_in)
except Exception: # pylint: disable=broad-except
# log the error, return silently
log.exception(
'Failed to handle marketing opt-in flag: user="%s", course="%s"', user.username, course_key
)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Attempt to create the basket and enroll the user.
"""
user = request.user
valid, course_key, error = self._is_data_valid(request)
if not valid:
return DetailResponse(error, status=HTTP_406_NOT_ACCEPTABLE)
embargo_response = embargo_api.get_embargo_response(request, course_key, user)
if embargo_response:
return embargo_response
# Don't do anything if an enrollment already exists
course_id = unicode(course_key)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if enrollment and enrollment.is_active:
msg = Messages.ENROLLMENT_EXISTS.format(course_id=course_id, username=user.username)
return DetailResponse(msg, status=HTTP_409_CONFLICT)
# If there is no honor course mode, this most likely a Prof-Ed course. Return an error so that the JS
# redirects to track selection.
honor_mode = CourseMode.mode_for_course(course_key, CourseMode.HONOR)
if not honor_mode:
msg = Messages.NO_HONOR_MODE.format(course_id=course_id)
return DetailResponse(msg, status=HTTP_406_NOT_ACCEPTABLE)
elif not honor_mode.sku:
# If there are no course modes with SKUs, enroll the user without contacting the external API.
msg = Messages.NO_SKU_ENROLLED.format(enrollment_mode=CourseMode.HONOR, course_id=course_id,
username=user.username)
log.info(msg)
self._enroll(course_key, user)
self._handle_marketing_opt_in(request, course_key, user)
return DetailResponse(msg)
# Setup the API
try:
api = ecommerce_api_client(user)
except ValueError:
self._enroll(course_key, user)
msg = Messages.NO_ECOM_API.format(username=user.username, course_id=unicode(course_key))
log.debug(msg)
return DetailResponse(msg)
response = None
# Make the API call
try:
response_data = api.baskets.post({
'products': [{'sku': honor_mode.sku}],
'checkout': True,
})
payment_data = response_data["payment_data"]
if payment_data:
# Pass data to the client to begin the payment flow.
response = JsonResponse(payment_data)
elif response_data['order']:
# The order was completed immediately because there is no charge.
msg = Messages.ORDER_COMPLETED.format(order_number=response_data['order']['number'])
log.debug(msg)
response = DetailResponse(msg)
else:
msg = u'Unexpected response from basket endpoint.'
log.error(
msg + u' Could not enroll user %(username)s in course %(course_id)s.',
{'username': user.id, 'course_id': course_id},
)
raise InvalidResponseError(msg)
except (exceptions.SlumberBaseException, exceptions.Timeout) as ex:
log.exception(ex.message)
return InternalRequestErrorResponse(ex.message)
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=honor_mode.slug,
processor_name=None,
user_id=user.id
)
self._handle_marketing_opt_in(request, course_key, user)
return response
class BasketOrderView(APIView):
""" Retrieve the order associated with a basket. """
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, *_args, **kwargs):
""" HTTP handler. """
try:
order = ecommerce_api_client(request.user).baskets(kwargs['basket_id']).order.get()
return JsonResponse(order)
except exceptions.HttpNotFoundError:
return JsonResponse(status=404)
| agpl-3.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/base_state_ubication/__init__.py | 11 | 1488 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2012 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import res_state
import res_partner
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
athompso/ansible | v1/tests/TestVaultEditor.py | 118 | 5729 | #!/usr/bin/env python
from unittest import TestCase
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible import errors
from ansible.utils.vault import VaultLib
from ansible.utils.vault import VaultEditor
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultEditor(TestCase):
def _is_fips(self):
try:
data = open('/proc/sys/crypto/fips_enabled').read().strip()
except:
return False
if data != '1':
return False
return True
def test_methods_exist(self):
v = VaultEditor(None, None, None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_decrypt_1_0(self):
if self._is_fips():
raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1_newline(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible\nansible\n", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.1 file with newline in password"
#assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.1.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.1 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
def test_rekey_migration(self):
if self._is_fips():
raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file('ansible2')
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError, e:
error_hit = True
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit == False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
| gpl-3.0 |
nichung/wwwflaskBlogrevA | env/lib/python2.7/site-packages/pip/vendor/html5lib/serializer/htmlserializer.py | 79 | 12467 | from __future__ import absolute_import, division, unicode_literals
from pip.vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"minimize_boolean_attributes", "use_trailing_solidus",
"space_before_trailing_solidus", "omit_optional_tags",
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
"escape_rcdata", "resolve_entities", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# XXX: WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mit |
WebSpider/SickRage | lib/ndg/httpsclient/test/test_utils.py | 57 | 2092 | """unit tests module for ndg.httpsclient.utils module
PyOpenSSL utility to make a httplib-like interface suitable for use with
urllib2
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "06/01/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import unittest
import os
from OpenSSL import SSL
from ndg.httpsclient.test import Constants
from ndg.httpsclient.utils import (Configuration, fetch_from_url, open_url,
_should_use_proxy)
class TestUtilsModule(unittest.TestCase):
'''Test ndg.httpsclient.utils module'''
def test01_configuration(self):
config = Configuration(SSL.Context(SSL.SSLv3_METHOD), True)
self.assert_(config.ssl_context)
self.assertEquals(config.debug, True)
def test02_fetch_from_url(self):
config = Configuration(SSL.Context(SSL.SSLv3_METHOD), True)
res = fetch_from_url(Constants.TEST_URI, config)
self.assert_(res)
def test03_open_url(self):
config = Configuration(SSL.Context(SSL.SSLv3_METHOD), True)
res = open_url(Constants.TEST_URI, config)
self.assertEqual(res[0], 200,
'open_url for %r failed' % Constants.TEST_URI)
def test04__should_use_proxy(self):
if 'no_proxy' in os.environ:
no_proxy = os.environ['no_proxy']
del os.environ['no_proxy']
else:
no_proxy = None
self.assertTrue(_should_use_proxy(Constants.TEST_URI),
'Expecting use proxy = True')
os.environ['no_proxy'] = 'localhost,localhost.localdomain'
self.assertFalse(_should_use_proxy(Constants.TEST_URI),
'Expecting use proxy = False')
if no_proxy is not None:
os.environ['no_proxy'] = no_proxy
else:
del os.environ['no_proxy']
if __name__ == "__main__":
unittest.main() | gpl-3.0 |
slav/reactJsNetExamples | MvcReact/MvcReact/node_modules/npm-check-updates/node_modules/npm/node_modules/node-gyp/gyp/gyptest.py | 525 | 7988 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
JingJunYin/tensorflow | tensorflow/python/tools/optimize_for_inference_lib.py | 49 | 18326 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes parts of a graph that are only needed for training.
There are several common transformations that can be applied to GraphDefs
created to train a model, that help reduce the amount of computation needed when
the network is used only for inference. These include:
- Removing training-only operations like checkpoint saving.
- Stripping out parts of the graph that are never reached.
- Removing debug operations like CheckNumerics.
- Folding batch normalization ops into the pre-calculated weights.
- Fusing common operations into unified versions.
This script takes a frozen GraphDef file (where the weight variables have been
converted into constants by the freeze_graph script) and outputs a new GraphDef
with the optimizations applied.
An example of command-line usage is:
bazel build tensorflow/python/tools:optimize_for_inference && \
bazel-bin/tensorflow/python/tools/optimize_for_inference \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/optimized_graph.pb \
--input_names=Mul \
--output_names=softmax
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import re
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import tf_logging
from tensorflow.python.tools import strip_unused_lib
flags = flags_lib
FLAGS = flags.FLAGS
# Support folding two types of batch norm ops:
# BatchNormWithGlobalNormalization and FusedBatchNorm. The two types only
# differ in input order and attribute names, so we've collected their
# differences up front.
INPUT_ORDER = {
# Order of inputs for BatchNormWithGlobalNormalization.
"BatchNormWithGlobalNormalization": [
"conv_op", "mean_op", "var_op", "beta_op", "gamma_op"
],
# Order of inputs for FusedBatchNorm.
"FusedBatchNorm": ["conv_op", "gamma_op", "beta_op", "mean_op", "var_op"]
}
# Name of the attribute epsilon value is stored in.
EPSILON_ATTR = {
"BatchNormWithGlobalNormalization": "variance_epsilon",
"FusedBatchNorm": "epsilon"
}
def optimize_for_inference(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Applies a series of inference optimizations on the input graph.
Args:
input_graph_def: A GraphDef containing a training model.
input_node_names: A list of names of the nodes that are fed inputs during
inference.
output_node_names: A list of names of the nodes that produce the final
results.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
An optimized version of the input graph.
"""
ensure_graph_is_valid(input_graph_def)
optimized_graph_def = input_graph_def
optimized_graph_def = strip_unused_lib.strip_unused(
optimized_graph_def, input_node_names, output_node_names,
placeholder_type_enum)
optimized_graph_def = graph_util.remove_training_nodes(
optimized_graph_def, output_node_names)
optimized_graph_def = fold_batch_norms(optimized_graph_def)
optimized_graph_def = fuse_resize_and_conv(optimized_graph_def,
output_node_names)
ensure_graph_is_valid(optimized_graph_def)
return optimized_graph_def
def ensure_graph_is_valid(graph_def):
"""Makes sure that the graph is internally consistent.
Checks basic properties of the graph def and raises an exception if there are
input references to missing nodes, duplicated names, or other logic errors.
Args:
graph_def: Definition of a graph to be checked.
Raises:
ValueError: If the graph is incorrectly constructed.
"""
node_map = {}
for node in graph_def.node:
if node.name not in node_map.keys():
node_map[node.name] = node
else:
raise ValueError("Duplicate node names detected for ", node.name)
for node in graph_def.node:
for input_name in node.input:
input_node_name = node_name_from_input(input_name)
if input_node_name not in node_map.keys():
raise ValueError("Input for ", node.name, " not found: ", input_name)
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def node_from_map(node_map, name):
"""Pulls a node def from a dictionary for a given name.
Args:
node_map: Dictionary containing an entry indexed by name for every node.
name: Identifies the node we want to find.
Returns:
NodeDef of the node with the given name.
Raises:
ValueError: If the node isn't present in the dictionary.
"""
stripped_name = node_name_from_input(name)
if stripped_name not in node_map:
raise ValueError("No node named '%s' found in map." % name)
return node_map[stripped_name]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
# Whether to scale by gamma after normalization.
def scale_after_normalization(node):
if node.op == "BatchNormWithGlobalNormalization":
return node.attr["scale_after_normalization"].b
return True
def fold_batch_norms(input_graph_def):
"""Removes batch normalization ops by folding them into convolutions.
Batch normalization during training has multiple dynamic parameters that are
updated, but once the graph is finalized these become constants. That means
there's an opportunity to reduce the computations down to a scale and
addition, rather than the more expensive multiple ops, and even bake the
scaling into the convolution weights. This function identifies the typical
pattern of batch normalization subgraphs, and performs the transformation to
fold the computations down into a simpler form. It currently only spots batch
normalization that's performed by the BatchNormWithGlobalNormalization op, and
will need to be extended in the future to handle the newer style.
Args:
input_graph_def: A GraphDef containing a model.
Returns:
Modified graph with BN ops removed, and modified weights.
Raises:
ValueError: If the graph is badly formed with duplicate node names.
"""
input_node_map = {}
for node in input_graph_def.node:
if node.name not in input_node_map.keys():
input_node_map[node.name] = node
else:
raise ValueError("Duplicate node names detected for ", node.name)
nodes_to_skip = {}
new_ops = []
for node in input_graph_def.node:
if node.op not in ("BatchNormWithGlobalNormalization", "FusedBatchNorm"):
continue
conv_op = node_from_map(input_node_map,
node.input[INPUT_ORDER[node.op].index("conv_op")])
if conv_op.op != "Conv2D":
tf_logging.warning(
"Didn't find expected Conv2D input to '%s'" % node.name)
continue
weights_op = node_from_map(input_node_map, conv_op.input[1])
if weights_op.op != "Const":
tf_logging.warning("Didn't find expected conv Constant input to '%s',"
" found %s instead. Maybe because freeze_graph wasn't"
" run first?" % (conv_op.name, weights_op))
continue
weights = values_from_const(weights_op)
channel_count = weights.shape[3]
mean_op = node_from_map(input_node_map,
node.input[INPUT_ORDER[node.op].index("mean_op")])
if mean_op.op != "Const":
tf_logging.warning("Didn't find expected mean Constant input to '%s',"
" found %s instead. Maybe because freeze_graph wasn't"
" run first?" % (node.name, mean_op))
continue
mean_value = values_from_const(mean_op)
if mean_value.shape != (channel_count,):
tf_logging.warning("Incorrect shape for mean, found %s, expected %s,"
" for node %s" % (str(mean_value.shape), str(
(channel_count,)), node.name))
continue
var_op = node_from_map(input_node_map,
node.input[INPUT_ORDER[node.op].index("var_op")])
if var_op.op != "Const":
tf_logging.warning("Didn't find expected var Constant input to '%s',"
" found %s instead. Maybe because freeze_graph wasn't"
" run first?" % (node.name, var_op))
continue
var_value = values_from_const(var_op)
if var_value.shape != (channel_count,):
tf_logging.warning("Incorrect shape for var, found %s, expected %s,"
" for node %s" % (str(var_value.shape), str(
(channel_count,)), node.name))
continue
beta_op = node_from_map(input_node_map,
node.input[INPUT_ORDER[node.op].index("beta_op")])
if beta_op.op != "Const":
tf_logging.warning("Didn't find expected beta Constant input to '%s',"
" found %s instead. Maybe because freeze_graph wasn't"
" run first?" % (node.name, beta_op))
continue
beta_value = values_from_const(beta_op)
if beta_value.shape != (channel_count,):
tf_logging.warning("Incorrect shape for beta, found %s, expected %s,"
" for node %s" % (str(beta_value.shape), str(
(channel_count,)), node.name))
continue
gamma_op = node_from_map(input_node_map,
node.input[INPUT_ORDER[node.op].index("gamma_op")])
if gamma_op.op != "Const":
tf_logging.warning("Didn't find expected gamma Constant input to '%s',"
" found %s instead. Maybe because freeze_graph wasn't"
" run first?" % (node.name, gamma_op))
continue
gamma_value = values_from_const(gamma_op)
if gamma_value.shape != (channel_count,):
tf_logging.warning("Incorrect shape for gamma, found %s, expected %s,"
" for node %s" % (str(gamma_value.shape), str(
(channel_count,)), node.name))
continue
variance_epsilon_value = node.attr[EPSILON_ATTR[node.op]].f
nodes_to_skip[node.name] = True
nodes_to_skip[weights_op.name] = True
nodes_to_skip[mean_op.name] = True
nodes_to_skip[var_op.name] = True
nodes_to_skip[beta_op.name] = True
nodes_to_skip[gamma_op.name] = True
nodes_to_skip[conv_op.name] = True
if scale_after_normalization(node):
scale_value = (
(1.0 / np.vectorize(math.sqrt)(var_value + variance_epsilon_value)) *
gamma_value)
else:
scale_value = (
1.0 / np.vectorize(math.sqrt)(var_value + variance_epsilon_value))
offset_value = (-mean_value * scale_value) + beta_value
scaled_weights = np.copy(weights)
it = np.nditer(
scaled_weights, flags=["multi_index"], op_flags=["readwrite"])
while not it.finished:
current_scale = scale_value[it.multi_index[3]]
it[0] *= current_scale
it.iternext()
scaled_weights_op = node_def_pb2.NodeDef()
scaled_weights_op.op = "Const"
scaled_weights_op.name = weights_op.name
scaled_weights_op.attr["dtype"].CopyFrom(weights_op.attr["dtype"])
scaled_weights_op.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
scaled_weights, weights.dtype.type, weights.shape)))
new_conv_op = node_def_pb2.NodeDef()
new_conv_op.CopyFrom(conv_op)
offset_op = node_def_pb2.NodeDef()
offset_op.op = "Const"
offset_op.name = conv_op.name + "_bn_offset"
offset_op.attr["dtype"].CopyFrom(mean_op.attr["dtype"])
offset_op.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
offset_value, mean_value.dtype.type, offset_value.shape)))
bias_add_op = node_def_pb2.NodeDef()
bias_add_op.op = "BiasAdd"
bias_add_op.name = node.name
bias_add_op.attr["T"].CopyFrom(conv_op.attr["T"])
bias_add_op.input.extend([new_conv_op.name, offset_op.name])
new_ops.extend([scaled_weights_op, new_conv_op, offset_op, bias_add_op])
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in nodes_to_skip:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
result_graph_def.node.extend([new_node])
result_graph_def.node.extend(new_ops)
return result_graph_def
def fuse_resize_and_conv(input_graph_def, output_node_names):
"""Merges preceding resize and mirror pad ops into a specialized convolution.
There's a common pattern of enlarging the input to a convolution using a
resize operation, and also using MirrorPad to extend the boundaries to that
zero edge pixels don't bleed inwards when convolving. This routine looks for
that pattern of operations, and fuses them together into a Conv2DWithResizeOp.
Args:
input_graph_def: A GraphDef containing a model.
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
Modified graph with resize and pad ops merged.
Raises:
ValueError: If the graph is badly formed with duplicate node names.
"""
input_node_map = {}
for node in input_graph_def.node:
if node.name not in input_node_map.keys():
input_node_map[node.name] = node
else:
raise ValueError("Duplicate node names detected for ", node.name)
node_reference_count = collections.defaultdict(int)
for node in input_graph_def.node:
for input_name in node.input:
stripped_name = node_name_from_input(input_name)
node_reference_count[stripped_name] += 1
for output_name in output_node_names:
node_reference_count[output_name] += 1
new_ops = []
for node in input_graph_def.node:
if node.op != "Conv2D":
continue
conv_op = node
input_op = node_from_map(input_node_map, conv_op.input[0])
if input_op.op == "MirrorPad":
mirror_pad_op = input_op
resize_op = node_from_map(input_node_map, mirror_pad_op.input[0])
if resize_op.op != "ResizeBilinear":
resize_op = None
else:
mirror_pad_op = None
if input_op.op == "ResizeBilinear":
resize_op = input_op
else:
resize_op = None
# There are no ops to be fused into the conv, so skip replacing this one.
if not mirror_pad_op and not resize_op:
continue
# We're replacing this node, so make sure the old one is removed.
node_reference_count[conv_op.name] = 0
if mirror_pad_op:
node_reference_count[mirror_pad_op.name] -= 1
if resize_op:
node_reference_count[resize_op.name] -= 1
fused_conv_op = node_def_pb2.NodeDef()
if resize_op:
fused_conv_op.op = "FusedResizeAndPadConv2D"
else:
fused_conv_op.op = "FusedPadConv2D"
fused_conv_op.name = conv_op.name
if mirror_pad_op:
mirror_paddings_name = mirror_pad_op.input[1]
mirror_paddings_mode = mirror_pad_op.attr["mode"]
else:
# If there was no MirrorPad op, then create settings that make the padding
# stage of the fused operation a no-op.
paddings_op = node_def_pb2.NodeDef()
paddings_op.op = "Const"
paddings_op.name = conv_op.name + "_dummy_paddings"
paddings_op.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum))
paddings_op.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[0, 0, 0, 0, 0, 0, 0, 0], dtypes.int32, [4, 2])))
new_ops.extend([paddings_op])
mirror_paddings_name = paddings_op.name
mirror_paddings_mode = attr_value_pb2.AttrValue(s=b"REFLECT")
if resize_op:
fused_conv_op.input.extend([
resize_op.input[0], resize_op.input[1], mirror_paddings_name,
conv_op.input[1]
])
fused_conv_op.attr["resize_align_corners"].CopyFrom(
resize_op.attr["align_corners"])
else:
fused_conv_op.input.extend(
[mirror_pad_op.input[0], mirror_paddings_name, conv_op.input[1]])
fused_conv_op.attr["T"].CopyFrom(conv_op.attr["T"])
fused_conv_op.attr["mode"].CopyFrom(mirror_paddings_mode)
fused_conv_op.attr["strides"].CopyFrom(conv_op.attr["strides"])
fused_conv_op.attr["padding"].CopyFrom(conv_op.attr["padding"])
new_ops.extend([fused_conv_op])
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node_reference_count[node.name] < 1:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
result_graph_def.node.extend([new_node])
result_graph_def.node.extend(new_ops)
return result_graph_def
| apache-2.0 |
mancoast/CPythonPyc_test | crash/276_test_urllib2.py | 45 | 54239 | import unittest
from test import test_support
import os
import socket
import StringIO
import urllib2
from urllib2 import Request, OpenerDirector
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib2.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib2.__file__).replace('\\', '/')
# And more hacking to get it to work on MacOS. This assumes
# urllib.pathname2url works, unfortunately...
if os.name == 'riscos':
import string
fname = os.expand(fname)
fname = fname.translate(string.maketrans("/.", "./"))
if os.name == 'nt':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib2.urlopen(file_url)
buf = f.read()
f.close()
def test_parse_http_list(self):
tests = [('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h', ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"', ['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib2.parse_http_list(string), list)
def test_request_headers_dict():
"""
The Request.headers dictionary is not a documented interface. It should
stay that way, because the complete set of headers are only accessible
through the .get_header(), .has_header(), .header_items() interface.
However, .headers pre-dates those methods, and so real code will be using
the dictionary.
The introduction in 2.4 of those methods was a mistake for the same reason:
code that previously saw all (urllib2 user)-provided headers in .headers
now sees only a subset (and the function interface is ugly and incomplete).
A better change would have been to replace .headers dict with a dict
subclass (or UserDict.DictMixin instance?) that preserved the .headers
interface and also provided access to the "unredirected" headers. It's
probably too late to fix that, though.
Check .capitalize() case normalization:
>>> url = "http://example.com"
>>> Request(url, headers={"Spam-eggs": "blah"}).headers["Spam-eggs"]
'blah'
>>> Request(url, headers={"spam-EggS": "blah"}).headers["Spam-eggs"]
'blah'
Currently, Request(url, "Spam-eggs").headers["Spam-Eggs"] raises KeyError,
but that could be changed in future.
"""
def test_request_headers_methods():
"""
Note the case normalization of header names here, to .capitalize()-case.
This should be preserved for backwards-compatibility. (In the HTTP case,
normalization to .title()-case is done by urllib2 before sending headers to
httplib).
>>> url = "http://example.com"
>>> r = Request(url, headers={"Spam-eggs": "blah"})
>>> r.has_header("Spam-eggs")
True
>>> r.header_items()
[('Spam-eggs', 'blah')]
>>> r.add_header("Foo-Bar", "baz")
>>> items = r.header_items()
>>> items.sort()
>>> items
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
>>> r.has_header("Not-there")
False
>>> print r.get_header("Not-there")
None
>>> r.get_header("Not-there", "default")
'default'
"""
def test_password_manager(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
>>> add("Some Realm", "http://example.com/", "joe", "password")
>>> add("Some Realm", "http://example.com/ni", "ni", "ni")
>>> add("c", "http://example.com/foo", "foo", "ni")
>>> add("c", "http://example.com/bar", "bar", "nini")
>>> add("b", "http://example.com/", "first", "blah")
>>> add("b", "http://example.com/", "second", "spam")
>>> add("a", "http://example.com", "1", "a")
>>> add("Some Realm", "http://c.example.com:3128", "3", "c")
>>> add("Some Realm", "d.example.com", "4", "d")
>>> add("Some Realm", "e.example.com:3128", "5", "e")
>>> mgr.find_user_password("Some Realm", "example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam/spam")
('joe', 'password')
>>> mgr.find_user_password("c", "http://example.com/foo")
('foo', 'ni')
>>> mgr.find_user_password("c", "http://example.com/bar")
('bar', 'nini')
Actually, this is really undefined ATM
## Currently, we use the highest-level path where more than one match:
## >>> mgr.find_user_password("Some Realm", "http://example.com/ni")
## ('joe', 'password')
Use latest add_password() in case of conflict:
>>> mgr.find_user_password("b", "http://example.com/")
('second', 'spam')
No special relationship between a.example.com and example.com:
>>> mgr.find_user_password("a", "http://example.com/")
('1', 'a')
>>> mgr.find_user_password("a", "http://a.example.com/")
(None, None)
Ports:
>>> mgr.find_user_password("Some Realm", "c.example.com")
(None, None)
>>> mgr.find_user_password("Some Realm", "c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "http://c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "d.example.com")
('4', 'd')
>>> mgr.find_user_password("Some Realm", "e.example.com:3128")
('5', 'e')
"""
pass
def test_password_manager_default_port(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
The point to note here is that we can't guess the default port if there's
no scheme. This applies to both add_password and find_user_password.
>>> add("f", "http://g.example.com:80", "10", "j")
>>> add("g", "http://h.example.com", "11", "k")
>>> add("h", "i.example.com:80", "12", "l")
>>> add("i", "j.example.com", "13", "m")
>>> mgr.find_user_password("f", "g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "g.example.com")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "http://g.example.com")
('10', 'j')
>>> mgr.find_user_password("g", "h.example.com")
('11', 'k')
>>> mgr.find_user_password("g", "h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("g", "http://h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("h", "i.example.com")
(None, None)
>>> mgr.find_user_password("h", "i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("h", "http://i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("i", "j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "j.example.com:80")
(None, None)
>>> mgr.find_user_password("i", "http://j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "http://j.example.com:80")
(None, None)
"""
class MockOpener:
addheaders = []
def open(self, req, data=None,timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile:
def read(self, count=None): pass
def readline(self, count=None): pass
def close(self): pass
class MockHeaders(dict):
def getheaders(self, name):
return self.values()
class MockResponse(StringIO.StringIO):
def __init__(self, code, msg, headers, data, url=None):
StringIO.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHTTPResponse:
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
def read(self):
return ''
class MockHTTPClass:
def __init__(self):
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
import socket
raise socket.error()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
def close(self):
pass
class MockHandler:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2: name, action = spec
else: name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib2.URLError("blah")
assert False
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib2.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = httplib.responses[self.code]
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = mimetools.Message(StringIO("\r\n\r\n"))
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockHTTPSHandler(urllib2.AbstractHTTPHandler):
# Useful for testing the Proxy-Authorization request by verifying the
# properties of httpcon
def __init__(self):
urllib2.AbstractHTTPHandler.__init__(self)
self.httpconn = MockHTTPClass()
def https_open(self, req):
return self.do_open(self.httpconn, req)
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib2 import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib2.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [
([("http_open", "return self")], 500),
(["http_open"], 0),
]:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
r = o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib2.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
## def test_error(self):
## # XXX this doesn't actually seem to be used in standard library,
## # but should really be tested anyway...
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
class Unknown:
def __eq__(self, other): return True
req = Request("http://example.com/")
r = o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertIsInstance(args[0], Request)
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
self.assertTrue(args[1] is None or
isinstance(args[1], MockResponse))
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper:
def __init__(self, data): self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return StringIO.StringIO(self.data), len(self.data)
def close(self): pass
class NullFTPHandler(urllib2.FTPHandler):
def __init__(self, data): self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
o = h.parent = MockOpener()
for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%25parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%2542parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "", "", "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "", "", "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assertEqual(h.user, user)
self.assertEqual(h.passwd, passwd)
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import rfc822, socket
h = urllib2.FileHandler()
o = h.parent = MockOpener()
TESTFN = test_support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
respurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
self.assertEqual(respurl, url)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib2.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib2.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
("file://somehost//foo/something.txt", True),
("file://localhost//foo/something.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib2.URLError, OSError):
self.assertTrue(not ftp)
else:
self.assertTrue(o.req is req)
self.assertEqual(req.type, "ftp")
self.assertEqual(req.type == "ftp", ftp)
def test_http(self):
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib2.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assertNotIn("Content-length", req.unredirected_hdrs)
self.assertNotIn("Content-type", req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
def test_http_doubleslash(self):
# Checks that the presence of an unnecessary double slash in a url doesn't break anything
# Previously, a double slash directly after the host could cause incorrect parsing of the url
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
data = ""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html",
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128",None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
weird_url = 'http://www.python.org?getspam'
req = Request(weird_url)
newreq = h.do_request_(req)
self.assertEqual(newreq.get_host(),'www.python.org')
self.assertEqual(newreq.get_selector(),'/?getspam')
url_without_path = 'http://www.python.org'
req = Request(url_without_path)
newreq = h.do_request_(req)
self.assertEqual(newreq.get_host(),'www.python.org')
self.assertEqual(newreq.get_selector(),'')
def test_errors(self):
h = urllib2.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assertTrue(h.http_response(req, r) is None)
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib2.HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assertTrue(cj.ach_req is req is newreq)
self.assertEqual(req.get_origin_req_host(), "example.com")
self.assertTrue(not req.is_unverifiable())
newr = h.http_response(req, r)
self.assertTrue(cj.ec_req is req)
self.assertTrue(cj.ec_r is r is newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib2.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib2.HTTPError:
# 307 in response to POST requires user OK
self.assertTrue(code == 307 and data is not None)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assertTrue(not o.req.has_data())
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertNotIn("content-length", headers)
self.assertNotIn("content-type", headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assertNotIn("Spam", o.req.headers)
self.assertNotIn("Spam", o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib2.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib2.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib2.HTTPError:
self.assertEqual(count,
urllib2.HTTPRedirectHandler.max_redirections)
def test_invalid_redirect(self):
from_url = "http://example.com/a.html"
valid_schemes = ['http', 'https', 'ftp']
invalid_schemes = ['file', 'imap', 'ldap']
schemeless_url = "example.com/b.html"
h = urllib2.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
for scheme in invalid_schemes:
invalid_url = scheme + '://' + schemeless_url
self.assertRaises(urllib2.HTTPError, h.http_error_302,
req, MockFile(), 302, "Security Loophole",
MockHeaders({"location": invalid_url}))
for scheme in valid_schemes:
valid_url = scheme + '://' + schemeless_url
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from cookielib import CookieJar
from test.test_cookielib import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib2.HTTPDefaultErrorHandler()
hrh = urllib2.HTTPRedirectHandler()
cp = urllib2.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assertTrue(not hh.req.has_header("Cookie"))
def test_redirect_fragment(self):
redirected_url = 'http://www.example.com/index.html#OK\r\n\r\n'
hh = MockHTTPHandler(302, 'Location: ' + redirected_url)
hdeh = urllib2.HTTPDefaultErrorHandler()
hrh = urllib2.HTTPRedirectHandler()
o = build_test_opener(hh, hdeh, hrh)
fp = o.open('http://www.example.com')
self.assertEqual(fp.geturl(), redirected_url.strip())
def test_proxy(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.get_host(), "www.perl.org")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.get_host(), "www.python.org")
r = o.open(req)
self.assertEqual(req.get_host(), "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
meth_spec = [
[("https_open","return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.get_host(), "www.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization","FooBar")
req.add_header("User-Agent","Grail")
self.assertEqual(req.get_host(), "www.example.com")
self.assertIsNone(req._tunnel_host)
r = o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertNotIn(("Proxy-Authorization","FooBar"),
https_handler.httpconn.req_headers)
self.assertIn(("User-Agent","Grail"),
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"),"FooBar")
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char) )
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected"
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_basic_auth_with_unquoted_realm(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
msg = "Basic Auth Realm was unquoted"
with test_support.check_warnings((msg, UserWarning)):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected"
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib2.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib2.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib2.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib2.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib2.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
class MiscTests(unittest.TestCase):
def test_build_opener(self):
class MyHTTPHandler(urllib2.HTTPHandler): pass
class FooHandler(urllib2.BaseHandler):
def foo_open(self): pass
class BarHandler(urllib2.BaseHandler):
def bar_open(self): pass
build_opener = urllib2.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler)
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler())
self.opener_has_handler(o, urllib2.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib2.HTTPHandler): pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assertTrue(False)
class RequestTests(unittest.TestCase):
def setUp(self):
self.get = urllib2.Request("http://www.python.org/~jeremy/")
self.post = urllib2.Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
def test_add_data(self):
self.assertTrue(not self.get.has_data())
self.assertEqual("GET", self.get.get_method())
self.get.add_data("spam")
self.assertTrue(self.get.has_data())
self.assertEqual("POST", self.get.get_method())
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.get_selector())
req = urllib2.Request("http://www.python.org/")
self.assertEqual("/", req.get_selector())
def test_get_type(self):
self.assertEqual("http", self.get.get_type())
def test_get_host(self):
self.assertEqual("www.python.org", self.get.get_host())
def test_get_host_unquote(self):
req = urllib2.Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.get_host())
def test_proxy(self):
self.assertTrue(not self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.get_origin_req_host())
self.assertEqual("www.perl.org", self.get.get_host())
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.get_host())
def test_url_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.get_selector())
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.get_selector())
# Issue 11703: geturl() omits fragment in the original URL.
url = 'http://docs.python.org/library/urllib2.html#OK'
req = Request(url)
self.assertEqual(req.get_full_url(), url)
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
>>> err = urllib2.HTTPError(msg='something bad happened', url=None, code=None, hdrs=None, fp=None)
>>> assert hasattr(err, 'reason')
>>> err.reason
'something bad happened'
"""
def test_HTTPError_interface_call(self):
"""
Issue 15701= - HTTPError interface has info method available from URLError.
"""
err = urllib2.HTTPError(msg='something bad happened', url=None,
code=None, hdrs='Content-Length:42', fp=None)
self.assertTrue(hasattr(err, 'reason'))
assert hasattr(err, 'reason')
assert hasattr(err, 'info')
assert callable(err.info)
try:
err.info()
except AttributeError:
self.fail("err.info() failed")
self.assertEqual(err.info(), "Content-Length:42")
def test_main(verbose=None):
from test import test_urllib2
test_support.run_doctest(test_urllib2, verbose)
test_support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 |
rdbwebster/ansible-modules-extras | windows/win_chocolatey.py | 78 | 3019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_chocolatey
version_added: "1.9"
short_description: Installs packages using chocolatey
description:
- Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages
options:
name:
description:
- Name of the package to be installed
required: true
default: null
aliases: []
state:
description:
- State of the package on the system
required: false
choices:
- present
- absent
default: present
aliases: []
force:
description:
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
required: false
choices:
- yes
- no
default: no
aliases: []
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version
required: false
choices:
- yes
- no
default: no
aliases: []
version:
description:
- Specific version of the package to be installed
- Ignored when state == 'absent'
required: false
default: null
aliases: []
source:
description:
- Specify source rather than using default chocolatey repository
require: false
default: null
aliases: []
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
EXAMPLES = '''
# Install git
win_chocolatey:
name: git
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: 6.6
# Uninstall git
win_chocolatey:
name: git
state: absent
# Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
'''
| gpl-3.0 |
avoinsystems/odoo | addons/hw_posbox_upgrade/__openerp__.py | 313 | 1696 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'PosBox Software Upgrader',
'version': '1.0',
'category': 'Hardware Drivers',
'website': 'https://www.odoo.com/page/point-of-sale',
'sequence': 6,
'summary': 'Allows to remotely upgrade the PosBox software',
'description': """
PosBox Software Upgrader
========================
This module allows to remotely upgrade the PosBox software to a
new version. This module is specific to the PosBox setup and environment
and should not be installed on regular openerp servers.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'test': [
],
'installable': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JJediny/python-social-auth | social/backends/odnoklassniki.py | 61 | 7050 | """
Odnoklassniki OAuth2 and Iframe Application backends, docs at:
http://psa.matiasaguirre.net/docs/backends/odnoklassnikiru.html
"""
from hashlib import md5
from social.p3 import unquote
from social.backends.base import BaseAuth
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthFailed
class OdnoklassnikiOAuth2(BaseOAuth2):
"""Odnoklassniki authentication backend"""
name = 'odnoklassniki-oauth2'
ID_KEY = 'uid'
ACCESS_TOKEN_METHOD = 'POST'
AUTHORIZATION_URL = 'http://www.odnoklassniki.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.odnoklassniki.ru/oauth/token.do'
EXTRA_DATA = [('refresh_token', 'refresh_token'),
('expires_in', 'expires')]
def get_user_details(self, response):
"""Return user details from Odnoklassniki request"""
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Odnoklassniki REST API"""
data = {'access_token': access_token, 'method': 'users.getCurrentUser'}
key, secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
return odnoklassniki_api(self, data, 'http://api.odnoklassniki.ru/',
public_key, secret, 'oauth')
class OdnoklassnikiApp(BaseAuth):
"""Odnoklassniki iframe app authentication backend"""
name = 'odnoklassniki-app'
ID_KEY = 'uid'
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
return dict([(key, value) for key, value in response.items()
if key in response['extra_data_list']])
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def auth_complete(self, *args, **kwargs):
self.verify_auth_sig()
response = self.get_response()
fields = ('uid', 'first_name', 'last_name', 'name') + \
self.setting('EXTRA_USER_DATA_LIST', ())
data = {
'method': 'users.getInfo',
'uids': '{0}'.format(response['logged_user_id']),
'fields': ','.join(fields),
}
client_key, client_secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
details = odnoklassniki_api(self, data, response['api_server'],
public_key, client_secret,
'iframe_nosession')
if len(details) == 1 and 'uid' in details[0]:
details = details[0]
auth_data_fields = self.setting('EXTRA_AUTH_DATA_LIST',
('api_server', 'apiconnection',
'session_key', 'authorized',
'session_secret_key'))
for field in auth_data_fields:
details[field] = response[field]
details['extra_data_list'] = fields + auth_data_fields
kwargs.update({'backend': self, 'response': details})
else:
raise AuthFailed(self, 'Cannot get user details: API error')
return self.strategy.authenticate(*args, **kwargs)
def get_auth_sig(self):
secret_key = self.setting('SECRET')
hash_source = '{0:s}{1:s}{2:s}'.format(self.data['logged_user_id'],
self.data['session_key'],
secret_key)
return md5(hash_source.encode('utf-8')).hexdigest()
def get_response(self):
fields = ('logged_user_id', 'api_server', 'application_key',
'session_key', 'session_secret_key', 'authorized',
'apiconnection')
return dict((name, self.data[name]) for name in fields
if name in self.data)
def verify_auth_sig(self):
correct_key = self.get_auth_sig()
key = self.data['auth_sig'].lower()
if correct_key != key:
raise AuthFailed(self, 'Wrong authorization key')
def odnoklassniki_oauth_sig(data, client_secret):
"""
Calculates signature of request data access_token value must be included
Algorithm is described at
http://dev.odnoklassniki.ru/wiki/pages/viewpage.action?pageId=12878032,
search for "little bit different way"
"""
suffix = md5(
'{0:s}{1:s}'.format(data['access_token'],
client_secret).encode('utf-8')
).hexdigest()
check_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()
if key != 'access_token'])
return md5((''.join(check_list) + suffix).encode('utf-8')).hexdigest()
def odnoklassniki_iframe_sig(data, client_secret_or_session_secret):
"""
Calculates signature as described at:
http://dev.odnoklassniki.ru/wiki/display/ok/
Authentication+and+Authorization
If API method requires session context, request is signed with session
secret key. Otherwise it is signed with application secret key
"""
param_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()])
return md5(
(''.join(param_list) + client_secret_or_session_secret).encode('utf-8')
).hexdigest()
def odnoklassniki_api(backend, data, api_url, public_key, client_secret,
request_type='oauth'):
"""Calls Odnoklassniki REST API method
http://dev.odnoklassniki.ru/wiki/display/ok/Odnoklassniki+Rest+API"""
data.update({
'application_key': public_key,
'format': 'JSON'
})
if request_type == 'oauth':
data['sig'] = odnoklassniki_oauth_sig(data, client_secret)
elif request_type == 'iframe_session':
data['sig'] = odnoklassniki_iframe_sig(data,
data['session_secret_key'])
elif request_type == 'iframe_nosession':
data['sig'] = odnoklassniki_iframe_sig(data, client_secret)
else:
msg = 'Unknown request type {0}. How should it be signed?'
raise AuthFailed(backend, msg.format(request_type))
return backend.get_json(api_url + 'fb.do', params=data)
| bsd-3-clause |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/unidecode/x072.py | 252 | 4659 | data = (
'He ', # 0x00
'Lan ', # 0x01
'Biao ', # 0x02
'Rong ', # 0x03
'Li ', # 0x04
'Mo ', # 0x05
'Bao ', # 0x06
'Ruo ', # 0x07
'Lu ', # 0x08
'La ', # 0x09
'Ao ', # 0x0a
'Xun ', # 0x0b
'Kuang ', # 0x0c
'Shuo ', # 0x0d
'[?] ', # 0x0e
'Li ', # 0x0f
'Lu ', # 0x10
'Jue ', # 0x11
'Liao ', # 0x12
'Yan ', # 0x13
'Xi ', # 0x14
'Xie ', # 0x15
'Long ', # 0x16
'Ye ', # 0x17
'[?] ', # 0x18
'Rang ', # 0x19
'Yue ', # 0x1a
'Lan ', # 0x1b
'Cong ', # 0x1c
'Jue ', # 0x1d
'Tong ', # 0x1e
'Guan ', # 0x1f
'[?] ', # 0x20
'Che ', # 0x21
'Mi ', # 0x22
'Tang ', # 0x23
'Lan ', # 0x24
'Zhu ', # 0x25
'[?] ', # 0x26
'Ling ', # 0x27
'Cuan ', # 0x28
'Yu ', # 0x29
'Zhua ', # 0x2a
'Tsumekanmuri ', # 0x2b
'Pa ', # 0x2c
'Zheng ', # 0x2d
'Pao ', # 0x2e
'Cheng ', # 0x2f
'Yuan ', # 0x30
'Ai ', # 0x31
'Wei ', # 0x32
'[?] ', # 0x33
'Jue ', # 0x34
'Jue ', # 0x35
'Fu ', # 0x36
'Ye ', # 0x37
'Ba ', # 0x38
'Die ', # 0x39
'Ye ', # 0x3a
'Yao ', # 0x3b
'Zu ', # 0x3c
'Shuang ', # 0x3d
'Er ', # 0x3e
'Qiang ', # 0x3f
'Chuang ', # 0x40
'Ge ', # 0x41
'Zang ', # 0x42
'Die ', # 0x43
'Qiang ', # 0x44
'Yong ', # 0x45
'Qiang ', # 0x46
'Pian ', # 0x47
'Ban ', # 0x48
'Pan ', # 0x49
'Shao ', # 0x4a
'Jian ', # 0x4b
'Pai ', # 0x4c
'Du ', # 0x4d
'Chuang ', # 0x4e
'Tou ', # 0x4f
'Zha ', # 0x50
'Bian ', # 0x51
'Die ', # 0x52
'Bang ', # 0x53
'Bo ', # 0x54
'Chuang ', # 0x55
'You ', # 0x56
'[?] ', # 0x57
'Du ', # 0x58
'Ya ', # 0x59
'Cheng ', # 0x5a
'Niu ', # 0x5b
'Ushihen ', # 0x5c
'Pin ', # 0x5d
'Jiu ', # 0x5e
'Mou ', # 0x5f
'Tuo ', # 0x60
'Mu ', # 0x61
'Lao ', # 0x62
'Ren ', # 0x63
'Mang ', # 0x64
'Fang ', # 0x65
'Mao ', # 0x66
'Mu ', # 0x67
'Gang ', # 0x68
'Wu ', # 0x69
'Yan ', # 0x6a
'Ge ', # 0x6b
'Bei ', # 0x6c
'Si ', # 0x6d
'Jian ', # 0x6e
'Gu ', # 0x6f
'You ', # 0x70
'Ge ', # 0x71
'Sheng ', # 0x72
'Mu ', # 0x73
'Di ', # 0x74
'Qian ', # 0x75
'Quan ', # 0x76
'Quan ', # 0x77
'Zi ', # 0x78
'Te ', # 0x79
'Xi ', # 0x7a
'Mang ', # 0x7b
'Keng ', # 0x7c
'Qian ', # 0x7d
'Wu ', # 0x7e
'Gu ', # 0x7f
'Xi ', # 0x80
'Li ', # 0x81
'Li ', # 0x82
'Pou ', # 0x83
'Ji ', # 0x84
'Gang ', # 0x85
'Zhi ', # 0x86
'Ben ', # 0x87
'Quan ', # 0x88
'Run ', # 0x89
'Du ', # 0x8a
'Ju ', # 0x8b
'Jia ', # 0x8c
'Jian ', # 0x8d
'Feng ', # 0x8e
'Pian ', # 0x8f
'Ke ', # 0x90
'Ju ', # 0x91
'Kao ', # 0x92
'Chu ', # 0x93
'Xi ', # 0x94
'Bei ', # 0x95
'Luo ', # 0x96
'Jie ', # 0x97
'Ma ', # 0x98
'San ', # 0x99
'Wei ', # 0x9a
'Li ', # 0x9b
'Dun ', # 0x9c
'Tong ', # 0x9d
'[?] ', # 0x9e
'Jiang ', # 0x9f
'Ikenie ', # 0xa0
'Li ', # 0xa1
'Du ', # 0xa2
'Lie ', # 0xa3
'Pi ', # 0xa4
'Piao ', # 0xa5
'Bao ', # 0xa6
'Xi ', # 0xa7
'Chou ', # 0xa8
'Wei ', # 0xa9
'Kui ', # 0xaa
'Chou ', # 0xab
'Quan ', # 0xac
'Fan ', # 0xad
'Ba ', # 0xae
'Fan ', # 0xaf
'Qiu ', # 0xb0
'Ji ', # 0xb1
'Cai ', # 0xb2
'Chuo ', # 0xb3
'An ', # 0xb4
'Jie ', # 0xb5
'Zhuang ', # 0xb6
'Guang ', # 0xb7
'Ma ', # 0xb8
'You ', # 0xb9
'Kang ', # 0xba
'Bo ', # 0xbb
'Hou ', # 0xbc
'Ya ', # 0xbd
'Yin ', # 0xbe
'Huan ', # 0xbf
'Zhuang ', # 0xc0
'Yun ', # 0xc1
'Kuang ', # 0xc2
'Niu ', # 0xc3
'Di ', # 0xc4
'Qing ', # 0xc5
'Zhong ', # 0xc6
'Mu ', # 0xc7
'Bei ', # 0xc8
'Pi ', # 0xc9
'Ju ', # 0xca
'Ni ', # 0xcb
'Sheng ', # 0xcc
'Pao ', # 0xcd
'Xia ', # 0xce
'Tuo ', # 0xcf
'Hu ', # 0xd0
'Ling ', # 0xd1
'Fei ', # 0xd2
'Pi ', # 0xd3
'Ni ', # 0xd4
'Ao ', # 0xd5
'You ', # 0xd6
'Gou ', # 0xd7
'Yue ', # 0xd8
'Ju ', # 0xd9
'Dan ', # 0xda
'Po ', # 0xdb
'Gu ', # 0xdc
'Xian ', # 0xdd
'Ning ', # 0xde
'Huan ', # 0xdf
'Hen ', # 0xe0
'Jiao ', # 0xe1
'He ', # 0xe2
'Zhao ', # 0xe3
'Ji ', # 0xe4
'Xun ', # 0xe5
'Shan ', # 0xe6
'Ta ', # 0xe7
'Rong ', # 0xe8
'Shou ', # 0xe9
'Tong ', # 0xea
'Lao ', # 0xeb
'Du ', # 0xec
'Xia ', # 0xed
'Shi ', # 0xee
'Hua ', # 0xef
'Zheng ', # 0xf0
'Yu ', # 0xf1
'Sun ', # 0xf2
'Yu ', # 0xf3
'Bi ', # 0xf4
'Mang ', # 0xf5
'Xi ', # 0xf6
'Juan ', # 0xf7
'Li ', # 0xf8
'Xia ', # 0xf9
'Yin ', # 0xfa
'Suan ', # 0xfb
'Lang ', # 0xfc
'Bei ', # 0xfd
'Zhi ', # 0xfe
'Yan ', # 0xff
)
| gpl-2.0 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/losses/python/losses/loss_ops_test.py | 82 | 55012 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(errors_impl.InvalidArgumentError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.test_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
| mit |
DavidLP/home-assistant | homeassistant/components/mopar/__init__.py | 7 | 4374 | """Support for Mopar vehicles."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.const import (
CONF_USERNAME,
CONF_PASSWORD,
CONF_PIN,
CONF_SCAN_INTERVAL
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import track_time_interval
DOMAIN = 'mopar'
DATA_UPDATED = '{}_data_updated'.format(DOMAIN)
_LOGGER = logging.getLogger(__name__)
COOKIE_FILE = 'mopar_cookies.pickle'
SUCCESS_RESPONSE = 'completed'
SUPPORTED_PLATFORMS = [LOCK, SENSOR, SWITCH]
DEFAULT_INTERVAL = timedelta(days=7)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_PIN): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):
vol.All(cv.time_period, cv.positive_timedelta),
})
}, extra=vol.ALLOW_EXTRA)
SERVICE_HORN = 'sound_horn'
ATTR_VEHICLE_INDEX = 'vehicle_index'
SERVICE_HORN_SCHEMA = vol.Schema({
vol.Required(ATTR_VEHICLE_INDEX): cv.positive_int
})
def setup(hass, config):
"""Set up the Mopar component."""
import motorparts
conf = config[DOMAIN]
cookie = hass.config.path(COOKIE_FILE)
try:
session = motorparts.get_session(
conf[CONF_USERNAME],
conf[CONF_PASSWORD],
conf[CONF_PIN],
cookie_path=cookie
)
except motorparts.MoparError:
_LOGGER.error("Failed to login")
return False
data = hass.data[DOMAIN] = MoparData(hass, session)
data.update(now=None)
track_time_interval(
hass, data.update, conf[CONF_SCAN_INTERVAL]
)
def handle_horn(call):
"""Enable the horn on the Mopar vehicle."""
data.actuate('horn', call.data[ATTR_VEHICLE_INDEX])
hass.services.register(
DOMAIN,
SERVICE_HORN,
handle_horn,
schema=SERVICE_HORN_SCHEMA
)
for platform in SUPPORTED_PLATFORMS:
load_platform(hass, platform, DOMAIN, {}, config)
return True
class MoparData:
"""
Container for Mopar vehicle data.
Prevents session expiry re-login race condition.
"""
def __init__(self, hass, session):
"""Initialize data."""
self._hass = hass
self._session = session
self.vehicles = []
self.vhrs = {}
self.tow_guides = {}
def update(self, now, **kwargs):
"""Update data."""
import motorparts
_LOGGER.debug("Updating vehicle data")
try:
self.vehicles = motorparts.get_summary(self._session)['vehicles']
except motorparts.MoparError:
_LOGGER.exception("Failed to get summary")
return
for index, _ in enumerate(self.vehicles):
try:
self.vhrs[index] = motorparts.get_report(self._session, index)
self.tow_guides[index] = motorparts.get_tow_guide(
self._session, index)
except motorparts.MoparError:
_LOGGER.warning("Failed to update for vehicle index %s", index)
return
dispatcher_send(self._hass, DATA_UPDATED)
@property
def attribution(self):
"""Get the attribution string from Mopar."""
import motorparts
return motorparts.ATTRIBUTION
def get_vehicle_name(self, index):
"""Get the name corresponding with this vehicle."""
vehicle = self.vehicles[index]
if not vehicle:
return None
return '{} {} {}'.format(
vehicle['year'],
vehicle['make'],
vehicle['model']
)
def actuate(self, command, index):
"""Run a command on the specified Mopar vehicle."""
import motorparts
try:
response = getattr(motorparts, command)(self._session, index)
except motorparts.MoparError as error:
_LOGGER.error(error)
return False
return response == SUCCESS_RESPONSE
| apache-2.0 |
clone1612/appstore | nextcloudappstore/core/tests/test_deletion_log.py | 2 | 1184 | from django.contrib.auth import get_user_model
from django.test import TestCase
from nextcloudappstore.core.models import App, AppRelease, AppReleaseDeleteLog
class DeletionLogTest(TestCase):
def test_delete_app(self):
user = get_user_model().objects.create(username='john')
app = App.objects.create(owner=user, id='news')
self.assertEqual(0, AppReleaseDeleteLog.objects.count())
app.delete()
self.assertEqual(1, AppReleaseDeleteLog.objects.count())
def test_delete_owner(self):
user = get_user_model().objects.create(username='john')
App.objects.create(owner=user, id='news')
self.assertEqual(0, AppReleaseDeleteLog.objects.count())
user.delete()
self.assertEqual(1, AppReleaseDeleteLog.objects.count())
def test_delete_app_release(self):
user = get_user_model().objects.create(username='john')
app = App.objects.create(owner=user, id='news')
release = AppRelease.objects.create(app=app, version='1.0.0')
self.assertEqual(0, AppReleaseDeleteLog.objects.count())
release.delete()
self.assertEqual(1, AppReleaseDeleteLog.objects.count())
| agpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/django/contrib/gis/gdal/field.py | 355 | 6739 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| mit |
GUBotDev/ardupilot | Tools/autotest/pysim/util.py | 56 | 12518 | import math
from math import sqrt, acos, cos, pi, sin, atan2
import os, sys, time, random
from rotmat import Vector3, Matrix3
from subprocess import call, check_call,Popen, PIPE
def m2ft(x):
'''meters to feet'''
return float(x) / 0.3048
def ft2m(x):
'''feet to meters'''
return float(x) * 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
def topdir():
'''return top of git tree where autotest is running from'''
d = os.path.dirname(os.path.realpath(__file__))
assert(os.path.basename(d)=='pysim')
d = os.path.dirname(d)
assert(os.path.basename(d)=='autotest')
d = os.path.dirname(d)
assert(os.path.basename(d)=='Tools')
d = os.path.dirname(d)
return d
def reltopdir(path):
'''return a path relative to topdir()'''
return os.path.normpath(os.path.join(topdir(), path))
def run_cmd(cmd, dir=".", show=False, output=False, checkfail=True):
'''run a shell command'''
if show:
print("Running: '%s' in '%s'" % (cmd, dir))
if output:
return Popen([cmd], shell=True, stdout=PIPE, cwd=dir).communicate()[0]
elif checkfail:
return check_call(cmd, shell=True, cwd=dir)
else:
return call(cmd, shell=True, cwd=dir)
def rmfile(path):
'''remove a file if it exists'''
try:
os.unlink(path)
except Exception:
pass
def deltree(path):
'''delete a tree of files'''
run_cmd('rm -rf %s' % path)
def build_SIL(atype, target='sitl'):
'''build desktop SIL'''
run_cmd("make clean",
dir=reltopdir(atype),
checkfail=True)
run_cmd("make %s" % target,
dir=reltopdir(atype),
checkfail=True)
return True
def build_AVR(atype, board='mega2560'):
'''build AVR binaries'''
config = open(reltopdir('config.mk'), mode='w')
config.write('''
HAL_BOARD=HAL_BOARD_APM1
BOARD=%s
PORT=/dev/null
''' % board)
config.close()
run_cmd("make clean", dir=reltopdir(atype), checkfail=True)
run_cmd("make", dir=reltopdir(atype), checkfail=True)
return True
# list of pexpect children to close on exit
close_list = []
def pexpect_autoclose(p):
'''mark for autoclosing'''
global close_list
close_list.append(p)
def pexpect_close(p):
'''close a pexpect child'''
global close_list
try:
p.close()
except Exception:
pass
try:
p.close(force=True)
except Exception:
pass
if p in close_list:
close_list.remove(p)
def pexpect_close_all():
'''close all pexpect children'''
global close_list
for p in close_list[:]:
pexpect_close(p)
def pexpect_drain(p):
'''drain any pending input'''
import pexpect
try:
p.read_nonblocking(1000, timeout=0)
except pexpect.TIMEOUT:
pass
def start_SIL(atype, valgrind=False, wipe=False, height=None):
'''launch a SIL instance'''
import pexpect
cmd=""
if valgrind and os.path.exists('/usr/bin/valgrind'):
cmd += 'valgrind -q --log-file=%s-valgrind.log ' % atype
executable = reltopdir('tmp/%s.build/%s.elf' % (atype, atype))
if not os.path.exists(executable):
executable = '/tmp/%s.build/%s.elf' % (atype, atype)
cmd += executable
if wipe:
cmd += ' -w'
if height is not None:
cmd += ' -H %u' % height
ret = pexpect.spawn(cmd, logfile=sys.stdout, timeout=5)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
ret.expect('Waiting for connection')
return ret
def start_MAVProxy_SIL(atype, aircraft=None, setup=False, master='tcp:127.0.0.1:5760',
options=None, logfile=sys.stdout):
'''launch mavproxy connected to a SIL instance'''
import pexpect
global close_list
MAVPROXY = os.getenv('MAVPROXY_CMD', 'mavproxy.py')
cmd = MAVPROXY + ' --master=%s --out=127.0.0.1:14550' % master
if setup:
cmd += ' --setup'
if aircraft is None:
aircraft = 'test.%s' % atype
cmd += ' --aircraft=%s' % aircraft
if options is not None:
cmd += ' ' + options
ret = pexpect.spawn(cmd, logfile=logfile, timeout=60)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
return ret
def expect_setup_callback(e, callback):
'''setup a callback that is called once a second while waiting for
patterns'''
import pexpect
def _expect_callback(pattern, timeout=e.timeout):
tstart = time.time()
while time.time() < tstart + timeout:
try:
ret = e.expect_saved(pattern, timeout=1)
return ret
except pexpect.TIMEOUT:
e.expect_user_callback(e)
pass
print("Timed out looking for %s" % pattern)
raise pexpect.TIMEOUT(timeout)
e.expect_user_callback = callback
e.expect_saved = e.expect
e.expect = _expect_callback
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def loadfile(fname):
'''load a file as a string'''
f = open(fname, mode='r')
r = f.read()
f.close()
return r
def lock_file(fname):
'''lock a file'''
import fcntl
f = open(fname, mode='w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
return None
return f
def check_parent(parent_pid=None):
'''check our parent process is still alive'''
if parent_pid is None:
try:
parent_pid = os.getppid()
except Exception:
pass
if parent_pid is None:
return
try:
os.kill(parent_pid, 0)
except Exception:
print("Parent had finished - exiting")
sys.exit(1)
def EarthRatesToBodyRates(dcm, earth_rates):
'''convert the angular velocities from earth frame to
body frame. Thanks to James Goppert for the formula
all inputs and outputs are in radians
returns a gyro vector in body frame, in rad/s
'''
from math import sin, cos
(phi, theta, psi) = dcm.to_euler()
phiDot = earth_rates.x
thetaDot = earth_rates.y
psiDot = earth_rates.z
p = phiDot - psiDot*sin(theta)
q = cos(phi)*thetaDot + sin(phi)*psiDot*cos(theta)
r = cos(phi)*psiDot*cos(theta) - sin(phi)*thetaDot
return Vector3(p, q, r)
def BodyRatesToEarthRates(dcm, gyro):
'''convert the angular velocities from body frame to
earth frame.
all inputs and outputs are in radians/s
returns a earth rate vector
'''
from math import sin, cos, tan, fabs
p = gyro.x
q = gyro.y
r = gyro.z
(phi, theta, psi) = dcm.to_euler()
phiDot = p + tan(theta)*(q*sin(phi) + r*cos(phi))
thetaDot = q*cos(phi) - r*sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q*sin(phi) + r*cos(phi))/cos(theta)
return Vector3(phiDot, thetaDot, psiDot)
def gps_newpos(lat, lon, bearing, distance):
'''extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html
'''
from math import sin, asin, cos, atan2, radians, degrees
radius_of_earth = 6378100.0 # in meters
lat1 = radians(lat)
lon1 = radians(lon)
brng = radians(bearing)
dr = distance/radius_of_earth
lat2 = asin(sin(lat1)*cos(dr) +
cos(lat1)*sin(dr)*cos(brng))
lon2 = lon1 + atan2(sin(brng)*sin(dr)*cos(lat1),
cos(dr)-sin(lat1)*sin(lat2))
return (degrees(lat2), degrees(lon2))
class Wind(object):
'''a wind generation object'''
def __init__(self, windstring, cross_section=0.1):
a = windstring.split(',')
if len(a) != 3:
raise RuntimeError("Expected wind in speed,direction,turbulance form, not %s" % windstring)
self.speed = float(a[0]) # m/s
self.direction = float(a[1]) # direction the wind is going in
self.turbulance= float(a[2]) # turbulance factor (standard deviation)
# the cross-section of the aircraft to wind. This is multiplied by the
# difference in the wind and the velocity of the aircraft to give the acceleration
self.cross_section = cross_section
# the time constant for the turbulance - the average period of the
# changes over time
self.turbulance_time_constant = 5.0
# wind time record
self.tlast = time.time()
# initial turbulance multiplier
self.turbulance_mul = 1.0
def current(self, deltat=None):
'''return current wind speed and direction as a tuple
speed is in m/s, direction in degrees
'''
if deltat is None:
tnow = time.time()
deltat = tnow - self.tlast
self.tlast = tnow
# update turbulance random walk
w_delta = math.sqrt(deltat)*(1.0-random.gauss(1.0, self.turbulance))
w_delta -= (self.turbulance_mul-1.0)*(deltat/self.turbulance_time_constant)
self.turbulance_mul += w_delta
speed = self.speed * math.fabs(self.turbulance_mul)
return (speed, self.direction)
# Calculate drag.
def drag(self, velocity, deltat=None, testing=None):
'''return current wind force in Earth frame. The velocity parameter is
a Vector3 of the current velocity of the aircraft in earth frame, m/s'''
from math import radians
# (m/s, degrees) : wind vector as a magnitude and angle.
(speed, direction) = self.current(deltat=deltat)
# speed = self.speed
# direction = self.direction
# Get the wind vector.
w = toVec(speed, radians(direction))
obj_speed = velocity.length()
# Compute the angle between the object vector and wind vector by taking
# the dot product and dividing by the magnitudes.
d = w.length() * obj_speed
if d == 0:
alpha = 0
else:
alpha = acos((w * velocity) / d)
# Get the relative wind speed and angle from the object. Note that the
# relative wind speed includes the velocity of the object; i.e., there
# is a headwind equivalent to the object's speed even if there is no
# absolute wind.
(rel_speed, beta) = apparent_wind(speed, obj_speed, alpha)
# Return the vector of the relative wind, relative to the coordinate
# system.
relWindVec = toVec(rel_speed, beta + atan2(velocity.y, velocity.x))
# Combine them to get the acceleration vector.
return Vector3( acc(relWindVec.x, drag_force(self, relWindVec.x))
, acc(relWindVec.y, drag_force(self, relWindVec.y))
, 0 )
# http://en.wikipedia.org/wiki/Apparent_wind
#
# Returns apparent wind speed and angle of apparent wind. Alpha is the angle
# between the object and the true wind. alpha of 0 rads is a headwind; pi a
# tailwind. Speeds should always be positive.
def apparent_wind(wind_sp, obj_speed, alpha):
delta = wind_sp * cos(alpha)
x = wind_sp**2 + obj_speed**2 + 2 * obj_speed * delta
rel_speed = sqrt(x)
if rel_speed == 0:
beta = pi
else:
beta = acos((delta + obj_speed) / rel_speed)
return (rel_speed, beta)
# See http://en.wikipedia.org/wiki/Drag_equation
#
# Drag equation is F(a) = cl * p/2 * v^2 * a, where cl : drag coefficient
# (let's assume it's low, .e.g., 0.2), p : density of air (assume about 1
# kg/m^3, the density just over 1500m elevation), v : relative speed of wind
# (to the body), a : area acted on (this is captured by the cross_section
# paramter).
#
# So then we have
# F(a) = 0.2 * 1/2 * v^2 * cross_section = 0.1 * v^2 * cross_section
def drag_force(wind, sp):
return (sp**2.0) * 0.1 * wind.cross_section
# Function to make the force vector. relWindVec is the direction the apparent
# wind comes *from*. We want to compute the accleration vector in the direction
# the wind blows to.
def acc(val, mag):
if val == 0:
return mag
else:
return (val / abs(val)) * (0 - mag)
# Converts a magnitude and angle (radians) to a vector in the xy plane.
def toVec(magnitude, angle):
v = Vector3(magnitude, 0, 0)
m = Matrix3()
m.from_euler(0, 0, angle)
return m.transposed() * v
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
Changaco/oh-mainline | vendor/packages/scrapy/scrapy/item.py | 22 | 1968 | """
Scrapy Item
See documentation in docs/topics/item.rst
"""
from pprint import pformat
from UserDict import DictMixin
from scrapy.utils.trackref import object_ref
class BaseItem(object_ref):
"""Base class for all scraped items."""
pass
class Field(dict):
"""Container of field metadata"""
class ItemMeta(type):
def __new__(mcs, class_name, bases, attrs):
fields = {}
new_attrs = {}
for n, v in attrs.iteritems():
if isinstance(v, Field):
fields[n] = v
else:
new_attrs[n] = v
cls = type.__new__(mcs, class_name, bases, new_attrs)
cls.fields = cls.fields.copy()
cls.fields.update(fields)
return cls
class DictItem(DictMixin, BaseItem):
fields = {}
def __init__(self, *args, **kwargs):
self._values = {}
if args or kwargs: # avoid creating dict for most common case
for k, v in dict(*args, **kwargs).iteritems():
self[k] = v
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError("%s does not support field: %s" % \
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def __getattr__(self, name):
if name in self.fields:
raise AttributeError("Use item[%r] to get field value" % name)
raise AttributeError(name)
def __setattr__(self, name, value):
if not name.startswith('_'):
raise AttributeError("Use item[%r] = %r to set field value" % \
(name, value))
super(DictItem, self).__setattr__(name, value)
def keys(self):
return self._values.keys()
def __repr__(self):
return pformat(dict(self))
class Item(DictItem):
__metaclass__ = ItemMeta
| agpl-3.0 |
dawran6/zulip | analytics/lib/counts.py | 5 | 16695 | from django.conf import settings
from django.db import connection, models
from django.db.models import F
from django.utils import timezone
from analytics.models import InstallationCount, RealmCount, \
UserCount, StreamCount, BaseCount, FillState, Anomaly, installation_epoch
from zerver.models import Realm, UserProfile, Message, Stream, models
from zerver.lib.timestamp import floor_to_day, floor_to_hour, ceiling_to_day, \
ceiling_to_hour
from typing import Any, Dict, Optional, Text, Tuple, Type, Union
from datetime import timedelta, datetime
import logging
import time
## Logging setup ##
log_format = '%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.ANALYTICS_LOG_PATH)
file_handler.setFormatter(formatter)
logger = logging.getLogger("zulip.management")
logger.setLevel(logging.INFO)
logger.addHandler(file_handler)
# First post office in Boston
MIN_TIME = datetime(1639, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
class CountStat(object):
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
# Allowed intervals are HOUR, DAY, and, GAUGE
GAUGE = 'gauge'
def __init__(self, property, zerver_count_query, filter_args, group_by, frequency, is_gauge):
# type: (str, ZerverCountQuery, Dict[str, bool], Optional[Tuple[models.Model, str]], str, bool) -> None
self.property = property
self.zerver_count_query = zerver_count_query
# might have to do something different for bitfields
self.filter_args = filter_args
self.group_by = group_by
if frequency not in self.FREQUENCIES:
raise AssertionError("Unknown frequency: %s" % (frequency,))
self.frequency = frequency
self.interval = self.GAUGE if is_gauge else frequency
self.is_logging = False
def __unicode__(self):
# type: () -> Text
return u"<CountStat: %s>" % (self.property,)
class LoggingCountStat(CountStat):
def __init__(self, property, analytics_table, frequency):
# type: (str, Type[BaseCount], str) -> None
CountStat.__init__(self, property, ZerverCountQuery(None, analytics_table, None), {}, None,
frequency, False)
self.is_logging = True
class ZerverCountQuery(object):
def __init__(self, zerver_table, analytics_table, query):
# type: (Type[models.Model], Type[BaseCount], Text) -> None
self.zerver_table = zerver_table
self.analytics_table = analytics_table
self.query = query
def do_update_fill_state(fill_state, end_time, state):
# type: (FillState, datetime, int) -> None
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
def process_count_stat(stat, fill_to_time):
# type: (CountStat, datetime) -> None
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s" % (stat.property, currently_filled))
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time))
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - timedelta(hours = 1)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s" % (stat.property,))
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError("Unknown value for FillState.state: %s." % (fill_state.state,))
currently_filled = currently_filled + timedelta(hours = 1)
while currently_filled <= fill_to_time:
logger.info("START %s %s %s" % (stat.property, stat.interval, currently_filled))
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + timedelta(hours = 1)
logger.info("DONE %s %s (%dms)" % (stat.property, stat.interval, (end-start)*1000))
# We assume end_time is on an hour boundary, and is timezone aware.
# It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat, end_time):
# type: (CountStat, datetime) -> None
if stat.frequency == CountStat.DAY and (end_time != floor_to_day(end_time)):
return
if stat.interval == CountStat.HOUR:
start_time = end_time - timedelta(hours = 1)
elif stat.interval == CountStat.DAY:
start_time = end_time - timedelta(days = 1)
else: # stat.interval == CountStat.GAUGE
start_time = MIN_TIME
if not stat.is_logging:
do_pull_from_zerver(stat, start_time, end_time)
do_aggregate_to_summary_table(stat, end_time)
def do_delete_counts_at_hour(stat, end_time):
# type: (CountStat, datetime) -> None
if stat.is_logging:
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.zerver_count_query.analytics_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_drop_all_analytics_tables():
# type: () -> None
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
Anomaly.objects.all().delete()
def do_aggregate_to_summary_table(stat, end_time):
# type: (CountStat, datetime) -> None
cursor = connection.cursor()
# Aggregate into RealmCount
analytics_table = stat.zerver_count_query.analytics_table
if analytics_table in (UserCount, StreamCount):
realmcount_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum(%(analytics_table)s.value), 0), '%(property)s',
%(analytics_table)s.subgroup, %%(end_time)s
FROM zerver_realm
JOIN %(analytics_table)s
ON
zerver_realm.id = %(analytics_table)s.realm_id
WHERE
%(analytics_table)s.property = '%(property)s' AND
%(analytics_table)s.end_time = %%(end_time)s
GROUP BY zerver_realm.id, %(analytics_table)s.subgroup
""" % {'analytics_table': analytics_table._meta.db_table,
'property': stat.property}
start = time.time()
cursor.execute(realmcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s RealmCount aggregation (%dms/%sr)" % (stat.property, (end-start)*1000, cursor.rowcount))
# Aggregate into InstallationCount
installationcount_query = """
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), '%(property)s', analytics_realmcount.subgroup, %%(end_time)s
FROM analytics_realmcount
WHERE
property = '%(property)s' AND
end_time = %%(end_time)s
GROUP BY analytics_realmcount.subgroup
""" % {'property': stat.property}
start = time.time()
cursor.execute(installationcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s InstallationCount aggregation (%dms/%sr)" % (stat.property, (end-start)*1000, cursor.rowcount))
cursor.close()
# This is the only method that hits the prod databases directly.
def do_pull_from_zerver(stat, start_time, end_time):
# type: (CountStat, datetime, datetime) -> None
zerver_table = stat.zerver_count_query.zerver_table._meta.db_table # type: ignore
join_args = ' '.join('AND %s.%s = %s' % (zerver_table, key, value)
for key, value in stat.filter_args.items())
if stat.group_by is None:
subgroup = 'NULL'
group_by_clause = ''
else:
subgroup = '%s.%s' % (stat.group_by[0]._meta.db_table, stat.group_by[1])
group_by_clause = ', ' + subgroup
# We do string replacement here because passing join_args as a param
# may result in problems when running cursor.execute; we do
# the string formatting prior so that cursor.execute runs it as sql
query_ = stat.zerver_count_query.query % {'zerver_table': zerver_table,
'property': stat.property,
'join_args': join_args,
'subgroup': subgroup,
'group_by_clause': group_by_clause}
cursor = connection.cursor()
start = time.time()
cursor.execute(query_, {'time_start': start_time, 'time_end': end_time})
end = time.time()
logger.info("%s do_pull_from_zerver (%dms/%sr)" % (stat.property, (end-start)*1000, cursor.rowcount))
cursor.close()
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object, stat, subgroup, event_time, increment=1):
# type: (Union[Realm, UserProfile, Stream], CountStat, Optional[Union[str, int, bool]], datetime, int) -> None
table = stat.zerver_count_query.analytics_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
count_user_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(%(zerver_table)s),'%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_userprofile.date_joined >= %%(time_start)s AND
zerver_userprofile.date_joined < %%(time_end)s
%(join_args)s
GROUP BY zerver_realm.id %(group_by_clause)s
"""
zerver_count_user_by_realm = ZerverCountQuery(UserProfile, RealmCount, count_user_by_realm_query)
# currently .sender_id is only Message specific thing
count_message_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %%(time_end)s AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
%(join_args)s
GROUP BY zerver_userprofile.id %(group_by_clause)s
"""
zerver_count_message_by_user = ZerverCountQuery(Message, UserCount, count_message_by_user_query)
# Currently unused and untested
count_stream_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_stream.date_created >= %%(time_start)s AND
zerver_stream.date_created < %%(time_end)s
%(join_args)s
GROUP BY zerver_realm.id %(group_by_clause)s
"""
zerver_count_stream_by_realm = ZerverCountQuery(Stream, RealmCount, count_stream_by_realm_query)
# This query violates the count_X_by_Y_query conventions in several ways. One,
# the X table is not specified by the query name; MessageType is not a zerver
# table. Two, it ignores the subgroup column in the CountStat object; instead,
# it uses 'message_type' from the subquery to fill in the subgroup column.
count_message_type_by_user_query = """
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, '%(property)s', message_type, %%(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type != 2 THEN 'private_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
%(join_args)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY zerver_userprofile.realm_id, zerver_userprofile.id, zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
"""
zerver_count_message_type_by_user = ZerverCountQuery(Message, UserCount, count_message_type_by_user_query)
# Note that this query also joins to the UserProfile table, since all
# current queries that use this also subgroup on UserProfile.is_bot. If in
# the future there is a query that counts messages by stream and doesn't need
# the UserProfile table, consider writing a new query for efficiency.
count_message_by_stream_query = """
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %%(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
%(join_args)s
GROUP BY zerver_stream.id %(group_by_clause)s
"""
zerver_count_message_by_stream = ZerverCountQuery(Message, StreamCount, count_message_by_stream_query)
count_stats_ = [
CountStat('active_users:is_bot:day', zerver_count_user_by_realm, {'is_active': True},
(UserProfile, 'is_bot'), CountStat.DAY, True),
CountStat('messages_sent:is_bot:hour', zerver_count_message_by_user, {},
(UserProfile, 'is_bot'), CountStat.HOUR, False),
CountStat('messages_sent:message_type:day', zerver_count_message_type_by_user, {},
None, CountStat.DAY, False),
CountStat('messages_sent:client:day', zerver_count_message_by_user, {},
(Message, 'sending_client_id'), CountStat.DAY, False),
CountStat('messages_in_stream:is_bot:day', zerver_count_message_by_stream, {},
(UserProfile, 'is_bot'), CountStat.DAY, False),
LoggingCountStat('active_users_log:is_bot:day', RealmCount, CountStat.DAY),
]
COUNT_STATS = {stat.property: stat for stat in count_stats_}
| apache-2.0 |
exelearning/iteexe | twisted/persisted/marmalade.py | 13 | 16145 | # -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Marmalade: jelly, with just a hint of bitterness.
I can serialize a Python object to an XML DOM tree (twisted.web.microdom), and
therefore to XML data, similarly to twisted.spread.jelly. Because both Python
lists and DOM trees are tree data-structures, many of the idioms used here are
identical.
"""
#import warnings
#warnings.warn("twisted.persisted.marmalade is deprecated", DeprecationWarning, stacklevel=2)
import new
from twisted.python.reflect import namedModule, namedClass, namedObject, fullFuncName, qual
from twisted.persisted.crefutil import NotKnown, _Tuple, _InstanceMethod, _DictKeyAndValue, _Dereference, _Defer
from twisted.spread.jelly import _newInstance
try:
from new import instancemethod
except:
from org.python.core import PyMethod
instancemethod = PyMethod
import types
import copy_reg
#for some reason, __builtins__ == __builtin__.__dict__ in the context where this is used.
#Can someone tell me why?
import __builtin__
def instance(klass, d):
if isinstance(klass, types.ClassType):
return new.instance(klass, d)
elif isinstance(klass, type):
o = object.__new__(klass)
o.__dict__ = d
return o
else:
raise TypeError, "%s is not a class" % klass
def getValueElement(node):
"""Get the one child element of a given element.
If there is more than one child element, raises ValueError. Otherwise,
returns the value element.
"""
valueNode = None
for subnode in node.childNodes:
if isinstance(subnode, Element):
if valueNode is None:
valueNode = subnode
else:
raise ValueError("Only one value node allowed per instance!")
return valueNode
class DOMJellyable:
jellyDOMVersion = 1
def jellyToDOM(self, jellier, element):
element.setAttribute("marmalade:version", str(self.jellyDOMVersion))
method = getattr(self, "jellyToDOM_%s" % self.jellyDOMVersion, None)
if method:
method(jellier, element)
else:
element.appendChild(jellier.jellyToNode(self.__dict__))
def unjellyFromDOM(self, unjellier, element):
pDOMVersion = element.getAttribute("marmalade:version") or "0"
method = getattr(self, "unjellyFromDOM_%s" % pDOMVersion, None)
if method:
method(unjellier, element)
else:
# XXX: DOMJellyable.unjellyNode does not exist
# XXX: 'node' is undefined - did you mean 'self', 'element', or 'Node'?
state = self.unjellyNode(getValueElement(node))
if hasattr(self.__class__, "__setstate__"):
self.__setstate__(state)
else:
self.__dict__ = state
class DOMUnjellier:
def __init__(self):
self.references = {}
self._savedLater = []
def unjellyLater(self, node):
"""Unjelly a node, later.
"""
d = _Defer()
self.unjellyInto(d, 0, node)
self._savedLater.append(d)
return d
def unjellyInto(self, obj, loc, node):
"""Utility method for unjellying one object into another.
This automates the handling of backreferences.
"""
o = self.unjellyNode(node)
obj[loc] = o
if isinstance(o, NotKnown):
o.addDependant(obj, loc)
return o
def unjellyAttribute(self, instance, attrName, valueNode):
"""Utility method for unjellying into instances of attributes.
Use this rather than unjellyNode unless you like surprising bugs!
Alternatively, you can use unjellyInto on your instance's __dict__.
"""
self.unjellyInto(instance.__dict__, attrName, valueNode)
def unjellyNode(self, node):
if node.tagName.lower() == "none":
retval = None
elif node.tagName == "string":
# XXX FIXME this is obviously insecure
# if you doubt:
# >>> unjellyFromXML('''<string value="h"+str(__import__("sys"))+"i" />''')
# "h<module 'sys' (built-in)>i"
retval = str(eval('"%s"' % node.getAttribute("value")))
elif node.tagName == "int":
retval = int(node.getAttribute("value"))
elif node.tagName == "float":
retval = float(node.getAttribute("value"))
elif node.tagName == "longint":
retval = long(node.getAttribute("value"))
elif node.tagName == "bool":
retval = int(node.getAttribute("value"))
if retval:
retval = True
else:
retval = False
elif node.tagName == "module":
retval = namedModule(str(node.getAttribute("name")))
elif node.tagName == "class":
retval = namedClass(str(node.getAttribute("name")))
elif node.tagName == "unicode":
retval = unicode(str(node.getAttribute("value")).replace("\\n", "\n").replace("\\t", "\t"), "raw_unicode_escape")
elif node.tagName == "function":
retval = namedObject(str(node.getAttribute("name")))
elif node.tagName == "method":
im_name = node.getAttribute("name")
im_class = namedClass(node.getAttribute("class"))
im_self = self.unjellyNode(getValueElement(node))
if im_class.__dict__.has_key(im_name):
if im_self is None:
retval = getattr(im_class, im_name)
elif isinstance(im_self, NotKnown):
retval = _InstanceMethod(im_name, im_self, im_class)
else:
retval = instancemethod(im_class.__dict__[im_name],
im_self,
im_class)
else:
raise "instance method changed"
elif node.tagName == "tuple":
l = []
tupFunc = tuple
for subnode in node.childNodes:
if isinstance(subnode, Element):
l.append(None)
if isinstance(self.unjellyInto(l, len(l)-1, subnode), NotKnown):
tupFunc = _Tuple
retval = tupFunc(l)
elif node.tagName == "list":
l = []
finished = 1
for subnode in node.childNodes:
if isinstance(subnode, Element):
l.append(None)
self.unjellyInto(l, len(l)-1, subnode)
retval = l
elif node.tagName == "dictionary":
d = {}
keyMode = 1
for subnode in node.childNodes:
if isinstance(subnode, Element):
if keyMode:
kvd = _DictKeyAndValue(d)
if not subnode.getAttribute("role") == "key":
raise "Unjellying Error: key role not set"
self.unjellyInto(kvd, 0, subnode)
else:
self.unjellyInto(kvd, 1, subnode)
keyMode = not keyMode
retval = d
elif node.tagName == "instance":
className = node.getAttribute("class")
clasz = namedClass(className)
if issubclass(clasz, DOMJellyable):
retval = _newInstance(clasz, {})
retval.unjellyFromDOM(self, node)
else:
state = self.unjellyNode(getValueElement(node))
if hasattr(clasz, "__setstate__"):
inst = _newInstance(clasz, {})
inst.__setstate__(state)
else:
inst = _newInstance(clasz, state)
retval = inst
elif node.tagName == "reference":
refkey = node.getAttribute("key")
retval = self.references.get(refkey)
if retval is None:
der = _Dereference(refkey)
self.references[refkey] = der
retval = der
elif node.tagName == "copyreg":
nodefunc = namedObject(node.getAttribute("loadfunc"))
loaddef = self.unjellyLater(getValueElement(node)).addCallback(
lambda result, _l: apply(_l, result), nodefunc)
retval = loaddef
else:
raise "Unsupported Node Type: %s" % str(node.tagName)
if node.hasAttribute("reference"):
refkey = node.getAttribute("reference")
ref = self.references.get(refkey)
if ref is None:
self.references[refkey] = retval
elif isinstance(ref, NotKnown):
ref.resolveDependants(retval)
self.references[refkey] = retval
else:
assert 0, "Multiple references with the same ID!"
return retval
def unjelly(self, doc):
l = [None]
self.unjellyInto(l, 0, doc.childNodes[0])
for svd in self._savedLater:
svd.unpause()
return l[0]
class DOMJellier:
def __init__(self):
# dict of {id(obj): (obj, node)}
self.prepared = {}
self.document = Document()
self._ref_id = 0
def prepareElement(self, element, object):
self.prepared[id(object)] = (object, element)
def jellyToNode(self, obj):
"""Create a node representing the given object and return it.
"""
objType = type(obj)
#immutable (We don't care if these have multiple refs)
if objType is types.NoneType:
node = self.document.createElement("None")
elif objType is types.StringType:
node = self.document.createElement("string")
r = repr(obj)
if r[0] == '"':
r = r.replace("'", "\\'")
else:
r = r.replace('"', '\\"')
node.setAttribute("value", r[1:-1])
# node.appendChild(CDATASection(obj))
elif objType is types.IntType:
node = self.document.createElement("int")
node.setAttribute("value", str(obj))
elif objType is types.LongType:
node = self.document.createElement("longint")
s = str(obj)
if s[-1] == 'L':
s = s[:-1]
node.setAttribute("value", s)
elif objType is types.FloatType:
node = self.document.createElement("float")
node.setAttribute("value", repr(obj))
elif objType is types.MethodType:
node = self.document.createElement("method")
node.setAttribute("name", obj.im_func.__name__)
node.setAttribute("class", qual(obj.im_class))
# TODO: make methods 'prefer' not to jelly the object internally,
# so that the object will show up where it's referenced first NOT
# by a method.
node.appendChild(self.jellyToNode(obj.im_self))
elif hasattr(types, 'BooleanType') and objType is types.BooleanType:
node = self.document.createElement("bool")
node.setAttribute("value", str(int(obj)))
elif objType is types.ModuleType:
node = self.document.createElement("module")
node.setAttribute("name", obj.__name__)
elif objType==types.ClassType or issubclass(objType, type):
node = self.document.createElement("class")
node.setAttribute("name", qual(obj))
elif objType is types.UnicodeType:
node = self.document.createElement("unicode")
obj = obj.encode('raw_unicode_escape')
s = obj.replace("\n", "\\n").replace("\t", "\\t")
node.setAttribute("value", s)
elif objType in (types.FunctionType, types.BuiltinFunctionType):
# TODO: beat pickle at its own game, and do BuiltinFunctionType
# separately, looking for __self__ attribute and unpickling methods
# of C objects when possible.
node = self.document.createElement("function")
node.setAttribute("name", fullFuncName(obj))
else:
#mutable!
if self.prepared.has_key(id(obj)):
oldNode = self.prepared[id(obj)][1]
if oldNode.hasAttribute("reference"):
# it's been referenced already
key = oldNode.getAttribute("reference")
else:
# it hasn't been referenced yet
self._ref_id = self._ref_id + 1
key = str(self._ref_id)
oldNode.setAttribute("reference", key)
node = self.document.createElement("reference")
node.setAttribute("key", key)
return node
node = self.document.createElement("UNNAMED")
self.prepareElement(node, obj)
if objType is types.ListType:
node.tagName = "list"
for subobj in obj:
node.appendChild(self.jellyToNode(subobj))
elif objType is types.TupleType:
node.tagName = "tuple"
for subobj in obj:
node.appendChild(self.jellyToNode(subobj))
elif objType is types.DictionaryType:
node.tagName = "dictionary"
for k, v in obj.items():
n = self.jellyToNode(k)
n.setAttribute("role", "key")
n2 = self.jellyToNode(v)
node.appendChild(n)
node.appendChild(n2)
elif copy_reg.dispatch_table.has_key(objType):
unpickleFunc, state = copy_reg.dispatch_table[objType](obj)
node = self.document.createElement("copyreg")
# node.setAttribute("type", objType.__name__)
node.setAttribute("loadfunc", fullFuncName(unpickleFunc))
node.appendChild(self.jellyToNode(state))
elif objType is types.InstanceType or hasattr(objType, "__module__"):
className = qual(obj.__class__)
node.tagName = "instance"
node.setAttribute("class", className)
if isinstance(obj, DOMJellyable):
obj.jellyToDOM(self, node)
else:
if hasattr(obj, "__getstate__"):
state = obj.__getstate__()
else:
state = obj.__dict__
n = self.jellyToNode(state)
node.appendChild(n)
else:
raise "Unsupported type: %s" % objType.__name__
return node
def jelly(self, obj):
"""Create a document representing the current object, and return it.
"""
node = self.jellyToNode(obj)
self.document.appendChild(node)
return self.document
def jellyToDOM(object):
"""Convert an Object into an twisted.web.microdom.Document.
"""
dj = DOMJellier()
document = dj.jelly(object)
return document
def unjellyFromDOM(document):
"""Convert an twisted.web.microdom.Document into a Python object.
"""
du = DOMUnjellier()
return du.unjelly(document)
def jellyToXML(object, file=None):
"""jellyToXML(object, [file]) -> None | string
Converts a Python object to an XML stream. If you pass a file, the XML
will be written to that file; otherwise, a string of the XML will be
returned.
"""
document = jellyToDOM(object)
if file:
document.writexml(file, "", " ", "\n")
else:
return document.toprettyxml(" ", "\n")
def unjellyFromXML(stringOrFile):
"""I convert a string or the contents of an XML file into a Python object.
"""
if hasattr(stringOrFile, "read"):
document = parse(stringOrFile)
else:
document = parseString(stringOrFile)
return unjellyFromDOM(document)
from twisted.web.microdom import Element, Document, parse, parseString, NodeList
| gpl-2.0 |
imiolek-ireneusz/pysiogame | i18n/custom/word_lists/pl_di.py | 1 | 12830 | # -*- coding: utf-8 -*-
# this is a list of words used by the word builder and word maze games and possibly
# other games built in the future
# these words are mainly most commonly used words in English + some other words
# in each sub-list in the list di first number is a number of words in the sublist
# to aviod counting it every time list is selected
# the sublists are consisting of words with len() of 3 - 10
# I think the way of going about internationalization here would be to create a new list
# with words most commonly used in your language rather than translating this
# ta lista jest bezpośrednim tłumaczeniem z angielskiego przy pomocy google translate - usunięte duplikaty, nie przetłumaczone słowa i przesortowana.
di = [
[61, 'akt', 'ale', 'ani', 'bez', 'boi', 'być', 'coś', 'cud', 'czy', 'dać', 'dał', 'dno', 'dom', 'dwa', 'dym', 'dół',
'gaz', 'gra', 'ich', 'jak', 'jej', 'już', 'jęk', 'koc', 'kot', 'koń', 'kto', 'kup', 'kąt', 'luz', 'lód', 'maj',
'moc', 'mój', 'nic', 'nie', 'nić', 'niż', 'nos', 'ona', 'oni', 'pan', 'pas', 'pić', 'pod', 'raz', 'rok', 'ryk',
'róg', 'sam', 'sen', 'się', 'snu', 'sos', 'sto', 'syn', 'sęk', 'tak', 'typ', 'wąż', 'źle']
, [128, 'albo', 'baza', 'blef', 'blok', 'brać', 'brąz', 'buty', 'było', 'były', 'cała', 'cena', 'chcę', 'cios',
'ciąg', 'czas', 'czuć', 'czuł', 'dość', 'drut', 'duch', 'duma', 'dużo', 'duży', 'dzyń', 'fakt', 'fale', 'figa',
'flet', 'grać', 'góry', 'głos', 'inne', 'inny', 'jego', 'jeść', 'klan', 'klej', 'klif', 'klin', 'klub', 'koło',
'krab', 'kraj', 'krok', 'król', 'ktoś', 'kula', 'kurs', 'lasu', 'lato', 'lewo', 'linę', 'mapa', 'małe', 'mało',
'mały', 'małż', 'miał', 'mieć', 'mila', 'miły', 'mowa', 'może', 'mrok', 'mróz', 'musi', 'mąka', 'nici', 'noga',
'nogi', 'nowy', 'obcy', 'obok', 'oczy', 'ogon', 'okno', 'opis', 'owoc', 'ołów', 'padł', 'pani', 'para', 'pies',
'pisk', 'pięć', 'pled', 'poza', 'puch', 'pług', 'rama', 'rano', 'rasa', 'ruch', 'rura', 'ryba', 'rytm', 'ręce',
'ręka', 'sens', 'siła', 'skok', 'skos', 'spać', 'stać', 'suma', 'słup', 'taca', 'temu', 'tlen', 'trop', 'twój',
'tępy', 'tłum', 'usta', 'uszy', 'użyj', 'wada', 'waga', 'wiek', 'woda', 'wzór', 'zima', 'znak', 'złom', 'łódź',
'żaba', 'żona', 'żywo']
, [219, 'akcja', 'biały', 'blask', 'brzęk', 'bęben', 'błoto', 'błysk', 'cegły', 'chleb', 'chwyt', 'ciało', 'cichy',
'cyfra', 'czaru', 'część', 'deska', 'dobry', 'dodaj', 'dolar', 'dowód', 'droga', 'drzwi', 'dumny', 'dusić',
'dzień', 'dziki', 'dzwon', 'długi', 'efekt', 'ekran', 'farba', 'firma', 'flaga', 'flota', 'gdzie', 'glina',
'gracz', 'grupa', 'grupy', 'grypa', 'gąbka', 'głowa', 'głowy', 'ilość', 'iskra', 'jajka', 'jasne', 'jasny',
'jazda', 'jeden', 'język', 'każdy', 'kiedy', 'kilka', 'klapa', 'klasa', 'klaun', 'klucz', 'kolor', 'kopia',
'koszt', 'końcu', 'kości', 'krowy', 'krzyk', 'krzyż', 'kupił', 'kwiat', 'kółka', 'latać', 'linia', 'marka',
'matka', 'mazać', 'minut', 'mięso', 'mleko', 'mniej', 'morze', 'mówić', 'mądry', 'młody', 'nagle', 'napar',
'naród', 'nasze', 'nauka', 'nawet', 'nazwa', 'niebo', 'niech', 'nigdy', 'niska', 'nocne', 'nosić', 'nosze',
'obraz', 'obrót', 'ogród', 'około', 'osiem', 'osoba', 'ostry', 'owady', 'palce', 'panna', 'parku', 'pazur',
'pchła', 'pewne', 'pełny', 'piegi', 'pieśń', 'pisze', 'plama', 'pobyt', 'pokój', 'potem', 'praca', 'prasa',
'prawo', 'przed', 'przód', 'ptaki', 'punkt', 'puste', 'płyta', 'ramię', 'razem', 'rondo', 'rosną', 'rosła',
'ruszt', 'rzeka', 'równy', 'różne', 'rękaw', 'sanki', 'serce', 'silny', 'skala', 'skała', 'sklep', 'skóry',
'skąpy', 'smuga', 'sobie', 'sonda', 'stado', 'stary', 'stała', 'stało', 'stały', 'suchy', 'szafa', 'szare',
'szary', 'szefa', 'sześć', 'szkic', 'szkło', 'szyna', 'słaby', 'słowa', 'takie', 'temat', 'teraz', 'trawa',
'tulić', 'tutaj', 'tułów', 'twarz', 'tylko', 'uczyć', 'ulewa', 'ulica', 'umysł', 'urwis', 'urząd', 'uwaga',
'uważa', 'ważny', 'wiatr', 'widok', 'wiele', 'wieża', 'winić', 'wolny', 'wrona', 'wujek', 'wybór', 'wydać',
'wynik', 'wypas', 'wyspa', 'wziął', 'wzrok', 'włosy', 'zapis', 'zboże', 'zegar', 'ziemi', 'zimno', 'znany',
'zwany', 'ząbek', 'złoty', 'łaska', 'łatwy', 'łyżkę', 'łyżwy', 'ślepy', 'śmiał', 'śnieg', 'śruba', 'świat',
'żuraw', 'życie', 'żółty']
,
[227, 'bardzo', 'biedny', 'biznes', 'blisko', 'bluzka', 'bogaty', 'bulion', 'będzie', 'chcieć', 'chmura', 'chusta',
'chwała', 'chylić', 'ciemny', 'cienki', 'ciepło', 'ciężki', 'czarne', 'czarny', 'czekać', 'cztery', 'czysty',
'czytać', 'często', 'daleko', 'deszcz', 'dobrze', 'dolina', 'dostał', 'drewno', 'drzewa', 'drzewo', 'dzieci',
'dziura', 'dziwne', 'dzięki', 'dźwięk', 'gorąco', 'gotowy', 'groszy', 'gładki', 'głośny', 'główny', 'jabłko',
'jednak', 'kaczor', 'kapłan', 'klamra', 'klepać', 'klimat', 'koniec', 'korona', 'korzeń', 'krakać', 'kryzys',
'krzyża', 'krótki', 'książę', 'kwiaty', 'lekarz', 'lekkie', 'lepiej', 'lepkie', 'liczba', 'liczyć', 'litery',
'ludzie', 'ludzki', 'marzec', 'metoda', 'miasto', 'miotłę', 'miękki', 'miłość', 'modlić', 'muzyka', 'myślał',
'należą', 'napisz', 'obecny', 'obiekt', 'obszar', 'oddech', 'odkrył', 'odpływ', 'odzież', 'ogólne', 'ojciec',
'opieka', 'osiadł', 'osobny', 'papier', 'partia', 'parzyć', 'pastel', 'pasuje', 'pewien', 'pełzać', 'piękne',
'piętro', 'plemię', 'plytki', 'podróż', 'poemat', 'pogoda', 'pomnik', 'ponury', 'postać', 'poszło', 'powoli',
'powrót', 'powódż', 'poziom', 'połysk', 'pranie', 'prawda', 'prawie', 'precel', 'proces', 'prosty', 'proszę',
'przodu', 'pstrąg', 'północ', 'płacić', 'płaska', 'płatki', 'płynna', 'pływak', 'pływać', 'pływał', 'raczej',
'raport', 'reszta', 'rodzaj', 'rozmaz', 'rządów', 'sekcja', 'siebie', 'siedem', 'silnik', 'skleić', 'skrypt',
'skuter', 'spacer', 'spadek', 'spisek', 'sportu', 'stacja', 'statek', 'strach', 'strajk', 'strona', 'strony',
'surowe', 'sweter', 'sylaby', 'szalik', 'szansa', 'szkoła', 'szopka', 'szpieg', 'sztuka', 'szybko', 'sądzić',
'sławny', 'służyć', 'taniec', 'trochę', 'trudny', 'trzask', 'trzeba', 'trąbka', 'twardy', 'upadek', 'uprawy',
'użycia', 'wielki', 'wioska', 'wiosna', 'więcej', 'wojsko', 'wschód', 'wspiął', 'wszedł', 'wydaje', 'wykres',
'wyrwać', 'wysoki', 'wyślij', 'wzrost', 'wąchać', 'własna', 'zabawa', 'zachód', 'zacisk', 'zaczął', 'zaimek',
'zajęty', 'zakres', 'zapach', 'zastaw', 'zawsze', 'zdanie', 'zespół', 'zestaw', 'zgrzyt', 'ziarno', 'ziemia',
'zmiana', 'zrobić', 'zrzęda', 'zwykle', 'zwykły', 'złamać', 'złamał', 'złapać', 'łabędź', 'ślimak', 'śliska',
'śliwka', 'śmigło', 'śpiący', 'środka', 'świerk', 'świeże', 'żelazo']
,
[177, 'bawełna', 'bodziec', 'brązowy', 'budować', 'budynek', 'cebulka', 'centrum', 'chociaż', 'chodził', 'chrapać',
'chronić', 'cieszyć', 'czasami', 'czaszka', 'dodatek', 'dostawa', 'dotacja', 'dziecko', 'dziobek', 'długość',
'ekspres', 'energii', 'fabryki', 'frakcja', 'gałązka', 'gniazdo', 'godzina', 'gwiazda', 'hamulec', 'hodowli',
'interes', 'jeszcze', 'jezioro', 'kapitan', 'kapitał', 'kobiece', 'kobieta', 'kobiety', 'kolumna', 'komórki',
'kołysać', 'kołyski', 'kościół', 'krawędź', 'krwawić', 'książka', 'księżyc', 'kształt', 'których', 'kucharz',
'latarka', 'maleńka', 'maszynę', 'materia', 'melodia', 'miejsce', 'miesiąc', 'mnóstwo', 'mosiądz', 'możliwe',
'nagroda', 'napawać', 'nasiona', 'odcinek', 'oddział', 'odległy', 'odmiana', 'odważny', 'odziany', 'oglądać',
'okruchy', 'okrągły', 'ostatni', 'oznacza', 'patelni', 'piszczą', 'planeta', 'planety', 'plaster', 'podczas',
'podmuch', 'podobny', 'podpiec', 'podróże', 'podskok', 'podział', 'pokazać', 'pokryty', 'pominąć', 'poniżej',
'posypać', 'poważne', 'powiedz', 'powolny', 'powyżej', 'położyć', 'poślizg', 'produkt', 'projekt', 'przeciw',
'przyjść', 'pułapka', 'pułapki', 'pytania', 'pytanie', 'później', 'płaszcz', 'płomień', 'rodzina', 'rolnicy',
'rozmiar', 'roślina', 'rośliny', 'rysunek', 'również', 'równiny', 'różnica', 'samolot', 'sekunda', 'siostra',
'skoczył', 'skorupa', 'skrobia', 'skręcać', 'spotkać', 'sprawdź', 'stadion', 'stracił', 'strumyk', 'symbole',
'szalony', 'szczery', 'szeroka', 'szeroki', 'szmatki', 'słownik', 'słuchać', 'słyszeć', 'tablica', 'trzpień',
'trzymaj', 'trzymać', 'trójkąt', 'tydzień', 'tysiące', 'tłuszcz', 'uczucie', 'uruchom', 'ustalić', 'usterka',
'uśmiech', 'warkocz', 'wartość', 'warunki', 'wieczny', 'wieczór', 'wkrótce', 'wskazać', 'wspólny', 'wtyczka',
'wymawia', 'wysłane', 'wzgórze', 'wędrują', 'zamiast', 'zarówno', 'zawiera', 'zgadnąć', 'zielony', 'zjechał',
'znaleźć', 'związek', 'złamane', 'ścianie', 'średnim', 'światło', 'żołądek']
, [113, 'aktualny', 'artykuły', 'bazgroły', 'chłopiec', 'czerwony', 'czynniki', 'człowiek', 'dlaczego', 'dmuchnął',
'dołączył', 'durszlak', 'dyskusja', 'dziesięć', 'elementy', 'gdakanie', 'głębokie', 'historia', 'jaskółka',
'jedzenie', 'kapelusz', 'kichanie', 'kierunek', 'kontrola', 'krzemień', 'krzyknął', 'metalowe', 'modlitwa',
'muszelka', 'nadzieja', 'najmniej', 'napisane', 'naprawdę', 'następny', 'naukowcy', 'niektóre', 'niszczyć',
'oddziały', 'odwrotny', 'osiągnął', 'oszustem', 'otrzymał', 'pamiętam', 'pantofel', 'paragraf', 'pierwszy',
'pochodni', 'początek', 'pojechał', 'pokazane', 'poleciał', 'pomiędzy', 'ponieważ', 'poprawny', 'poruszyć',
'powinien', 'powtarza', 'południe', 'praktyka', 'produkty', 'profesor', 'przemysł', 'przepływ', 'przykład',
'prędkość', 'pustynia', 'północne', 'płyniemy', 'równanie', 'samochód', 'siedział', 'siedziba', 'siedzieć',
'skrzydeł', 'sprzedać', 'sprzęgło', 'spódnica', 'stabilny', 'stocznia', 'straszyć', 'strumień', 'strzałka',
'strzemię', 'studenci', 'sukienka', 'szczotka', 'szczupły', 'szprychy', 'towarowe', 'urodzony', 'urzędnik',
'wcześnie', 'wewnątrz', 'wiedział', 'wiedzieć', 'wszystko', 'wybrzeże', 'wydawało', 'wyjaśnić', 'wypalone',
'wzrośnie', 'własność', 'zachodni', 'zaklęcia', 'zapasowy', 'zaufanie', 'zdzierać', 'zlepiają', 'znajduje',
'zobaczyć', 'zostawić', 'zraszacz', 'zszywacz', 'związany']
, [77, 'czasownik', 'dokładnie', 'dopasować', 'dorosłego', 'drukowane', 'elegancki', 'jednostka', 'kiełkować',
'kompletna', 'konieczne', 'kroplówka', 'kręgosłup', 'mieszanki', 'możliwość', 'najlepszy', 'naturalny',
'niebieski', 'obietnica', 'oczekiwać', 'odpowiedź', 'odwiedzić', 'ostrożnie', 'pieniądze', 'pierścień',
'pociągnął', 'polowanie', 'potomstwa', 'powietrze', 'powitalny', 'pozostaje', 'prezydent', 'produkcji',
'prowadził', 'przeszedł', 'przyczyna', 'przynieść', 'przyniósł', 'przypadek', 'przyszedł', 'przytulny',
'pęknięcie', 'rozumiesz', 'rozwiązać', 'rumieniec', 'specjalny', 'spożywcze', 'szczegóły', 'szczelina',
'szlifować', 'szorstkim', 'słodzenia', 'słyszałem', 'truskawka', 'uderzenie', 'udowodnić', 'warczenie',
'wciśnięty', 'widziałem', 'wiewiórka', 'winogrona', 'winogrono', 'większość', 'wspaniały', 'wyciągnąć',
'wyciągnął', 'wyrażenie', 'zamrożone', 'zapowiedź', 'zbudowany', 'zderzenie', 'zniesiony', 'zszywanie',
'zwierzęta', 'zwłaszcza', 'ćwiczenia', 'śpiewania', 'żołnierzy']
,
[57, 'bezpieczny', 'ciężarówka', 'cząsteczki', 'członkowie', 'deklaracja', 'dziesiętny', 'dziewczyny', 'energiczny',
'faktycznie', 'kliknięcie', 'kwadratowa', 'materiałem', 'nakrzyczeć', 'nauczyciel', 'niedźwiedź', 'nowoczesny',
'obserwować', 'podzielone', 'porównanie', 'powiedział', 'powiedzieć', 'pozdrawiam', 'przeglądać', 'przestrzeń',
'przeszczep', 'przeszłość', 'przyjaciel', 'przyjechał', 'przyrostek', 'przystawki', 'roszczenie', 'rozgwiazda',
'rozlewania', 'rozwinięte', 'rzeczownik', 'rękawiczka', 'samogłoskę', 'silniejszy', 'skanowanie', 'spojrzenie',
'spuchnięta', 'spółgłoska', 'strumykiem', 'stworzenie', 'substancje', 'szczęśliwy', 'szlachetny', 'uzgodniony',
'uśmiechnął', 'wszystkich', 'wypełnione', 'wypróbować', 'zadowolony', 'zamrożenie', 'zamówienie', 'zawieszona',
'świerszcza']
]
| gpl-3.0 |
santoshsahoo/namebench | nb_third_party/dns/rdtypes/ANY/LOC.py | 248 | 12571 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
100000000L, 1000000000L, 10000000000L)
def _exponent_of(what, desc):
exp = None
for i in xrange(len(_pows)):
if what // _pows[i] == 0L:
exp = i - 1
break
if exp is None or exp < 0:
raise dns.exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = long(round(what * 3600000))
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees * sign, minutes, seconds, what)
def _tuple_to_float(what):
if what[0] < 0:
sign = -1
value = float(what[0]) * -1
else:
sign = 1
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return sign * value
def _encode_size(what, desc):
what = long(what);
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns.exception.SyntaxError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns.exception.SyntaxError("bad %s base" % desc)
return long(base) * pow(10, exponent)
class LOC(dns.rdata.Rdata):
"""LOC record
@ivar latitude: latitude
@type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
seconds, and milliseconds of the coordinate.
@ivar longitude: longitude
@type longitude: (int, int, int, int) tuple specifying the degrees,
minutes, seconds, and milliseconds of the coordinate.
@ivar altitude: altitude
@type altitude: float
@ivar size: size of the sphere
@type size: float
@ivar horizontal_precision: horizontal precision
@type horizontal_precision: float
@ivar vertical_precision: vertical precision
@type vertical_precision: float
@see: RFC 1876"""
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=1.0, hprec=10000.0, vprec=10.0):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats."""
super(LOC, self).__init__(rdclass, rdtype)
if isinstance(latitude, int) or isinstance(latitude, long):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
self.latitude = latitude
if isinstance(longitude, int) or isinstance(longitude, long):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
self.longitude = longitude
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[0] > 0:
lat_hemisphere = 'N'
lat_degrees = self.latitude[0]
else:
lat_hemisphere = 'S'
lat_degrees = -1 * self.latitude[0]
if self.longitude[0] > 0:
long_hemisphere = 'E'
long_degrees = self.longitude[0]
else:
long_hemisphere = 'W'
long_degrees = -1 * self.longitude[0]
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere, self.altitude / 100.0
)
if self.size != 1.0 or self.horizontal_precision != 10000.0 or \
self.vertical_precision != 10.0:
text += " %0.2fm %0.2fm %0.2fm" % (
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = [0, 0, 0, 0]
longitude = [0, 0, 0, 0]
size = 1.0
hprec = 10000.0
vprec = 10.0
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad latitude seconds value')
latitude[2] = int(seconds)
if latitude[2] >= 60:
raise dns.exception.SyntaxError('latitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[0] *= -1
elif t != 'N':
raise dns.exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad longitude seconds value')
longitude[2] = int(seconds)
if longitude[2] >= 60:
raise dns.exception.SyntaxError('longitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[0] *= -1
elif t != 'E':
raise dns.exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0 : -1]
altitude = float(t) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
size = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
hprec = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
vprec = float(value) * 100.0 # m -> cm
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
if self.latitude[0] < 0:
sign = -1
degrees = long(-1 * self.latitude[0])
else:
sign = 1
degrees = long(self.latitude[0])
milliseconds = (degrees * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * sign
latitude = 0x80000000L + milliseconds
if self.longitude[0] < 0:
sign = -1
degrees = long(-1 * self.longitude[0])
else:
sign = 1
degrees = long(self.longitude[0])
milliseconds = (degrees * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * sign
longitude = 0x80000000L + milliseconds
altitude = long(self.altitude) + 10000000L
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
struct.unpack("!BBBBIII", wire[current : current + rdlen])
if latitude > 0x80000000L:
latitude = float(latitude - 0x80000000L) / 3600000
else:
latitude = -1 * float(0x80000000L - latitude) / 3600000
if latitude < -90.0 or latitude > 90.0:
raise dns.exception.FormError("bad latitude")
if longitude > 0x80000000L:
longitude = float(longitude - 0x80000000L) / 3600000
else:
longitude = -1 * float(0x80000000L - longitude) / 3600000
if longitude < -180.0 or longitude > 180.0:
raise dns.exception.FormError("bad longitude")
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
def _get_float_latitude(self):
return _tuple_to_float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = _float_to_tuple(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return _tuple_to_float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = _float_to_tuple(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
| apache-2.0 |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/pygments/lexers/dalvik.py | 47 | 4420 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
| gpl-3.0 |
umlfri/umlfri2 | umlfri2/application/commands/solution/applymetamodelconfigpatch.py | 1 | 1069 | from umlfri2.application.events.solution import MetamodelConfigChangedEvent
from ..base import Command, CommandNotDone
class ApplyMetamodelConfigPatchCommand(Command):
def __init__(self, solution, project, patch):
self.__project = project
self.__solution = solution
self.__patch = patch
@property
def description(self):
return "Changed config of the '{0}' project metamodel".format(self.__project.name)
def _do(self, ruler):
if not self.__patch.has_changes:
raise CommandNotDone
self.__project.apply_config_patch(self.__patch)
self.__solution.invalidate_all_caches()
def _redo(self, ruler):
self.__project.apply_config_patch(self.__patch)
self.__solution.invalidate_all_caches()
def _undo(self, ruler):
self.__project.apply_config_patch(self.__patch.make_reverse())
self.__solution.invalidate_all_caches()
def get_updates(self):
yield MetamodelConfigChangedEvent(self.__project, self.__patch)
| gpl-3.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/ovirt/ovirt_storage_templates_facts.py | 69 | 4244 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_templates_facts
short_description: Retrieve facts about one or more oVirt/RHV templates relate to a storage domain.
author: "Maor Lipchuk"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt/RHV templates relate to a storage domain."
notes:
- "This module creates a new top-level C(ovirt_storage_templates) fact, which
contains a list of templates."
options:
unregistered:
description:
- "Flag which indicates whether to get unregistered templates which contain one or more
disks which reside on a storage domain or diskless templates."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all Templates which relate to a storage domain and
# are unregistered:
- ovirt_storage_templates_facts:
unregistered=True
- debug:
var: ovirt_storage_templates
'''
RETURN = '''
ovirt_storage_templates:
description: "List of dictionaries describing the Templates. Template attribues are mapped to dictionary keys,
all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
get_id_by_name
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
all_content=dict(default=False, type='bool'),
case_sensitive=dict(default=True, type='bool'),
storage_domain=dict(default=None),
max=dict(default=None, type='int'),
unregistered=dict(default=False, type='bool'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_domains_service = connection.system_service().storage_domains_service()
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
templates_service = storage_domain_service.templates_service()
# Find the the unregistered Template we want to register:
if module.params.get('unregistered'):
templates = templates_service.list(unregistered=True)
else:
templates = templates_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_storage_templates=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in templates
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| bsd-3-clause |
utamaro/youtube-dl | youtube_dl/extractor/muzu.py | 147 | 2224 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
class MuzuTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.muzu\.tv/(.+?)/(.+?)/(?P<id>\d+)'
IE_NAME = 'muzu.tv'
_TEST = {
'url': 'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/',
'md5': '98f8b2c7bc50578d6a0364fff2bfb000',
'info_dict': {
'id': '1981454',
'ext': 'mp4',
'title': 'Cat Walk (Original Mix)',
'description': 'md5:90e868994de201b2570e4e5854e19420',
'uploader': 'MarcAshken featuring SOS',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
info_data = compat_urllib_parse.urlencode({
'format': 'json',
'url': url,
})
info = self._download_json(
'http://www.muzu.tv/api/oembed/?%s' % info_data,
video_id, 'Downloading video info')
player_info = self._download_json(
'http://player.muzu.tv/player/playerInit?ai=%s' % video_id,
video_id, 'Downloading player info')
video_info = player_info['videos'][0]
for quality in ['1080', '720', '480', '360']:
if video_info.get('v%s' % quality):
break
data = compat_urllib_parse.urlencode({
'ai': video_id,
# Even if each time you watch a video the hash changes,
# it seems to work for different videos, and it will work
# even if you use any non empty string as a hash
'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k',
'device': 'web',
'qv': quality,
})
video_url_info = self._download_json(
'http://player.muzu.tv/player/requestVideo?%s' % data,
video_id, 'Downloading video url')
video_url = video_url_info['url']
return {
'id': video_id,
'title': info['title'],
'url': video_url,
'thumbnail': info['thumbnail_url'],
'description': info['description'],
'uploader': info['author_name'],
}
| unlicense |
minhphung171093/GreenERP_V8 | openerp/addons/im_chat/im_chat.py | 268 | 21928 | # -*- coding: utf-8 -*-
import base64
import datetime
import logging
import time
import uuid
import random
import simplejson
import openerp
from openerp.http import request
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.addons.bus.bus import TIMEOUT
_logger = logging.getLogger(__name__)
DISCONNECTION_TIMER = TIMEOUT + 5
AWAY_TIMER = 600 # 10 minutes
#----------------------------------------------------------
# Models
#----------------------------------------------------------
class im_chat_conversation_state(osv.Model):
""" Adds a state on the m2m between user and session. """
_name = 'im_chat.conversation_state'
_table = "im_chat_session_res_users_rel"
_columns = {
"state" : fields.selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')]),
"session_id" : fields.many2one('im_chat.session', 'Session', required=True, ondelete="cascade"),
"user_id" : fields.many2one('res.users', 'Users', required=True, ondelete="cascade"),
}
_defaults = {
"state" : 'open'
}
class im_chat_session(osv.Model):
""" Conversations."""
_order = 'id desc'
_name = 'im_chat.session'
_rec_name = 'uuid'
_columns = {
'uuid': fields.char('UUID', size=50, select=True),
'message_ids': fields.one2many('im_chat.message', 'to_id', 'Messages'),
'user_ids': fields.many2many('res.users', 'im_chat_session_res_users_rel', 'session_id', 'user_id', "Session Users"),
'session_res_users_rel': fields.one2many('im_chat.conversation_state', 'session_id', 'Relation Session Users'),
}
_defaults = {
'uuid': lambda *args: '%s' % uuid.uuid4(),
}
def is_in_session(self, cr, uid, uuid, user_id, context=None):
""" return if the given user_id is in the session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
return user_id and user_id in [u.id for u in session.user_ids]
return False
def users_infos(self, cr, uid, ids, context=None):
""" get the user infos for all the user in the session """
for session in self.pool["im_chat.session"].browse(cr, uid, ids, context=context):
users_infos = self.pool["res.users"].read(cr, uid, [u.id for u in session.user_ids], ['id','name', 'im_status'], context=context)
return users_infos
def is_private(self, cr, uid, ids, context=None):
for session_id in ids:
""" return true if the session is private between users no external messages """
mess_ids = self.pool["im_chat.message"].search(cr, uid, [('to_id','=',session_id),('from_id','=',None)], context=context)
return len(mess_ids) == 0
def session_info(self, cr, uid, ids, context=None):
""" get the session info/header of a given session """
for session in self.browse(cr, uid, ids, context=context):
info = {
'uuid': session.uuid,
'users': session.users_infos(),
'state': 'open',
}
# add uid_state if available
if uid:
domain = [('user_id','=',uid), ('session_id','=',session.id)]
uid_state = self.pool['im_chat.conversation_state'].search_read(cr, uid, domain, ['state'], context=context)
if uid_state:
info['state'] = uid_state[0]['state']
return info
def session_get(self, cr, uid, user_to, context=None):
""" returns the canonical session between 2 users, create it if needed """
session_id = False
if user_to:
sids = self.search(cr, uid, [('user_ids','in', user_to),('user_ids', 'in', [uid])], context=context, limit=1)
for sess in self.browse(cr, uid, sids, context=context):
if len(sess.user_ids) == 2 and sess.is_private():
session_id = sess.id
break
else:
session_id = self.create(cr, uid, { 'user_ids': [(6,0, (user_to, uid))] }, context=context)
return self.session_info(cr, uid, [session_id], context=context)
def update_state(self, cr, uid, uuid, state=None, context=None):
""" modify the fold_state of the given session, and broadcast to himself (e.i. : to sync multiple tabs) """
domain = [('user_id','=',uid), ('session_id.uuid','=',uuid)]
ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
for sr in self.pool['im_chat.conversation_state'].browse(cr, uid, ids, context=context):
if not state:
state = sr.state
if sr.state == 'open':
state = 'folded'
else:
state = 'open'
self.pool['im_chat.conversation_state'].write(cr, uid, ids, {'state': state}, context=context)
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname, 'im_chat.session', uid), sr.session_id.session_info())
def add_user(self, cr, uid, uuid, user_id, context=None):
""" add the given user to the given session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if user_id not in [u.id for u in session.user_ids]:
self.write(cr, uid, [session.id], {'user_ids': [(4, user_id)]}, context=context)
# notify the all the channel users and anonymous channel
notifications = []
for channel_user_id in session.user_ids:
info = self.session_info(cr, channel_user_id.id, [session.id], context=context)
notifications.append([(cr.dbname, 'im_chat.session', channel_user_id.id), info])
# Anonymous are not notified when a new user is added : cannot exec session_info as uid = None
info = self.session_info(cr, openerp.SUPERUSER_ID, [session.id], context=context)
notifications.append([session.uuid, info])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# send a message to the conversation
user = self.pool['res.users'].read(cr, uid, user_id, ['name'], context=context)
self.pool["im_chat.message"].post(cr, uid, uid, session.uuid, "meta", user['name'] + " joined the conversation.", context=context)
def get_image(self, cr, uid, uuid, user_id, context=None):
""" get the avatar of a user in the given session """
#default image
image_b64 = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
# get the session
if user_id:
session_id = self.pool["im_chat.session"].search(cr, uid, [('uuid','=',uuid), ('user_ids','in', user_id)])
if session_id:
# get the image of the user
res = self.pool["res.users"].read(cr, uid, [user_id], ["image_small"])[0]
if res["image_small"]:
image_b64 = res["image_small"]
return image_b64
class im_chat_message(osv.Model):
""" Sessions messsages type can be 'message' or 'meta'.
For anonymous message, the from_id is False.
Messages are sent to a session not to users.
"""
_name = 'im_chat.message'
_order = "id desc"
_columns = {
'create_date': fields.datetime('Create Date', required=True, select=True),
'from_id': fields.many2one('res.users', 'Author'),
'to_id': fields.many2one('im_chat.session', 'Session To', required=True, select=True, ondelete='cascade'),
'type': fields.selection([('message','Message'), ('meta','Meta')], 'Type'),
'message': fields.char('Message'),
}
_defaults = {
'type' : 'message',
}
def init_messages(self, cr, uid, context=None):
""" get unread messages and old messages received less than AWAY_TIMER
ago and the session_info for open or folded window
"""
# get the message since the AWAY_TIMER
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
threshold = threshold.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('to_id.user_ids', 'in', [uid]), ('create_date','>',threshold)]
# get the message since the last poll of the user
presence_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', '=', uid)], context=context)
if presence_ids:
presence = self.pool['im_chat.presence'].browse(cr, uid, presence_ids, context=context)[0]
threshold = presence.last_poll
domain.append(('create_date','>',threshold))
messages = self.search_read(cr, uid, domain, ['from_id','to_id','create_date','type','message'], order='id asc', context=context)
# get the session of the messages and the not-closed ones
session_ids = map(lambda m: m['to_id'][0], messages)
domain = [('user_id','=',uid), '|', ('state','!=','closed'), ('session_id', 'in', session_ids)]
session_rels_ids = self.pool['im_chat.conversation_state'].search(cr, uid, domain, context=context)
# re-open the session where a message have been recieve recently
session_rels = self.pool['im_chat.conversation_state'].browse(cr, uid, session_rels_ids, context=context)
reopening_session = []
notifications = []
for sr in session_rels:
si = sr.session_id.session_info()
si['state'] = sr.state
if sr.state == 'closed':
si['state'] = 'folded'
reopening_session.append(sr.id)
notifications.append([(cr.dbname,'im_chat.session', uid), si])
for m in messages:
notifications.append([(cr.dbname,'im_chat.session', uid), m])
self.pool['im_chat.conversation_state'].write(cr, uid, reopening_session, {'state': 'folded'}, context=context)
return notifications
def post(self, cr, uid, from_uid, uuid, message_type, message_content, context=None):
""" post and broadcast a message, return the message id """
message_id = False
Session = self.pool['im_chat.session']
session_ids = Session.search(cr, uid, [('uuid','=',uuid)], context=context)
notifications = []
for session in Session.browse(cr, uid, session_ids, context=context):
# build the new message
vals = {
"from_id": from_uid,
"to_id": session.id,
"type": message_type,
"message": message_content,
}
# save it
message_id = self.create(cr, uid, vals, context=context)
# broadcast it to channel (anonymous users) and users_ids
data = self.read(cr, uid, [message_id], ['from_id','to_id','create_date','type','message'], context=context)[0]
notifications.append([uuid, data])
for user in session.user_ids:
notifications.append([(cr.dbname, 'im_chat.session', user.id), data])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return message_id
def get_messages(self, cr, uid, uuid, last_id=False, limit=20, context=None):
""" get messages (id desc) from given last_id in the given session """
Session = self.pool['im_chat.session']
if Session.is_in_session(cr, uid, uuid, uid, context=context):
domain = [("to_id.uuid", "=", uuid)]
if last_id:
domain.append(("id", "<", last_id));
return self.search_read(cr, uid, domain, ['id', 'create_date','to_id','from_id', 'type', 'message'], limit=limit, context=context)
return False
class im_chat_presence(osv.Model):
""" im_chat_presence status can be: online, away or offline.
This model is a one2one, but is not attached to res_users to avoid database concurrence errors
"""
_name = 'im_chat.presence'
_columns = {
'user_id' : fields.many2one('res.users', 'Users', required=True, select=True, ondelete="cascade"),
'last_poll': fields.datetime('Last Poll'),
'last_presence': fields.datetime('Last Presence'),
'status' : fields.selection([('online','Online'), ('away','Away'), ('offline','Offline')], 'IM Status'),
}
_defaults = {
'last_poll' : fields.datetime.now,
'last_presence' : fields.datetime.now,
'status' : 'offline'
}
_sql_constraints = [('im_chat_user_status_unique','unique(user_id)', 'A user can only have one IM status.')]
def update(self, cr, uid, presence=True, context=None):
""" register the poll, and change its im status if necessary. It also notify the Bus if the status has changed. """
presence_ids = self.search(cr, uid, [('user_id', '=', uid)], context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
# set the default values
send_notification = True
vals = {
'last_poll': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'status' : presences and presences[0].status or 'offline'
}
# update the user or a create a new one
if not presences:
vals['status'] = 'online'
vals['user_id'] = uid
self.create(cr, uid, vals, context=context)
else:
if presence:
vals['last_presence'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
vals['status'] = 'online'
else:
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
if datetime.datetime.strptime(presences[0].last_presence, DEFAULT_SERVER_DATETIME_FORMAT) < threshold:
vals['status'] = 'away'
send_notification = presences[0].status != vals['status']
# write only if the last_poll is passed TIMEOUT, or if the status has changed
delta = datetime.datetime.now() - datetime.datetime.strptime(presences[0].last_poll, DEFAULT_SERVER_DATETIME_FORMAT)
if (delta > datetime.timedelta(seconds=TIMEOUT) or send_notification):
self.write(cr, uid, presence_ids, vals, context=context)
# avoid TransactionRollbackError
cr.commit()
# notify if the status has changed
if send_notification:
self.pool['bus.bus'].sendone(cr, uid, (cr.dbname,'im_chat.presence'), {'id': uid, 'im_status': vals['status']})
# gc : disconnect the users having a too old last_poll. 1 on 100 chance to do it.
if random.random() < 0.01:
self.check_users_disconnection(cr, uid, context=context)
return True
def check_users_disconnection(self, cr, uid, context=None):
""" disconnect the users having a too old last_poll """
dt = (datetime.datetime.now() - datetime.timedelta(0, DISCONNECTION_TIMER)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
presence_ids = self.search(cr, uid, [('last_poll', '<', dt), ('status' , '!=', 'offline')], context=context)
self.write(cr, uid, presence_ids, {'status': 'offline'}, context=context)
presences = self.browse(cr, uid, presence_ids, context=context)
notifications = []
for presence in presences:
notifications.append([(cr.dbname,'im_chat.presence'), {'id': presence.user_id.id, 'im_status': presence.status}])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
return True
class res_users(osv.Model):
_inherit = "res.users"
def _get_im_status(self, cr, uid, ids, fields, arg, context=None):
""" function computing the im_status field of the users """
r = dict((i, 'offline') for i in ids)
status_ids = self.pool['im_chat.presence'].search(cr, uid, [('user_id', 'in', ids)], context=context)
status = self.pool['im_chat.presence'].browse(cr, uid, status_ids, context=context)
for s in status:
r[s.user_id.id] = s.status
return r
_columns = {
'im_status' : fields.function(_get_im_status, type="char", string="IM Status"),
}
def im_search(self, cr, uid, name, limit=20, context=None):
""" search users with a name and return its id, name and im_status """
result = [];
# find the employee group
group_employee = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_user')[1]
where_clause_base = " U.active = 't' "
query_params = ()
if name:
where_clause_base += " AND P.name ILIKE %s "
query_params = query_params + ('%'+name+'%',)
# first query to find online employee
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id != %s
AND EXISTS (SELECT 1 FROM res_groups_users_rel G WHERE G.gid = %s AND G.uid = U.id)
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (uid, group_employee, limit))
result = result + cr.dictfetchall()
# second query to find other online people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM im_chat_presence S
JOIN res_users U ON S.user_id = U.id
JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
AND S.status = 'online'
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
# third query to find all other people
if(len(result) < limit):
cr.execute('''SELECT U.id as id, P.name as name, COALESCE(S.status, 'offline') as im_status
FROM res_users U
LEFT JOIN im_chat_presence S ON S.user_id = U.id
LEFT JOIN res_partner P ON P.id = U.partner_id
WHERE '''+where_clause_base+'''
AND U.id NOT IN %s
ORDER BY P.name
LIMIT %s
''', query_params + (tuple([u["id"] for u in result]) + (uid,), limit-len(result)))
result = result + cr.dictfetchall()
return result
#----------------------------------------------------------
# Controllers
#----------------------------------------------------------
class Controller(openerp.addons.bus.bus.Controller):
def _poll(self, dbname, channels, last, options):
if request.session.uid:
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
registry.get('im_chat.presence').update(cr, uid, options.get('im_presence', False), context=context)
## For performance issue, the real time status notification is disabled. This means a change of status are still braoadcasted
## but not received by anyone. Otherwise, all listening user restart their longpolling at the same time and cause a 'ConnectionPool Full Error'
## since there is not enought cursors for everyone. Now, when a user open his list of users, an RPC call is made to update his user status list.
##channels.append((request.db,'im_chat.presence'))
# channel to receive message
channels.append((request.db,'im_chat.session', request.uid))
return super(Controller, self)._poll(dbname, channels, last, options)
@openerp.http.route('/im_chat/init', type="json", auth="none")
def init(self):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
notifications = registry['im_chat.message'].init_messages(cr, uid, context=context)
return notifications
@openerp.http.route('/im_chat/post', type="json", auth="none")
def post(self, uuid, message_type, message_content):
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
# execute the post method as SUPERUSER_ID
message_id = registry["im_chat.message"].post(cr, openerp.SUPERUSER_ID, uid, uuid, message_type, message_content, context=context)
return message_id
@openerp.http.route(['/im_chat/image/<string:uuid>/<string:user_id>'], type='http', auth="none")
def image(self, uuid, user_id):
registry, cr, context, uid = request.registry, request.cr, request.context, request.session.uid
# get the image
Session = registry.get("im_chat.session")
image_b64 = Session.get_image(cr, openerp.SUPERUSER_ID, uuid, simplejson.loads(user_id), context)
# built the response
image_data = base64.b64decode(image_b64)
headers = [('Content-Type', 'image/png')]
headers.append(('Content-Length', len(image_data)))
return request.make_response(image_data, headers)
@openerp.http.route(['/im_chat/history'], type="json", auth="none")
def history(self, uuid, last_id=False, limit=20):
registry, cr, uid, context = request.registry, request.cr, request.session.uid or openerp.SUPERUSER_ID, request.context
return registry["im_chat.message"].get_messages(cr, uid, uuid, last_id, limit, context=context)
# vim:et:
| agpl-3.0 |
foodszhang/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| lgpl-3.0 |
jffernandez/kivy | examples/frameworks/twisted/echo_client_app.py | 38 | 2147 | #install_twisted_rector must be called before importing the reactor
from kivy.support import install_twisted_reactor
install_twisted_reactor()
#A simple Client that send messages to the echo server
from twisted.internet import reactor, protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.factory.app.on_connection(self.transport)
def dataReceived(self, data):
self.factory.app.print_message(data)
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def __init__(self, app):
self.app = app
def clientConnectionLost(self, conn, reason):
self.app.print_message("connection lost")
def clientConnectionFailed(self, conn, reason):
self.app.print_message("connection failed")
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
# A simple kivy App, with a textbox to enter messages, and
# a large label to display all the messages received from
# the server
class TwistedClientApp(App):
connection = None
def build(self):
root = self.setup_gui()
self.connect_to_server()
return root
def setup_gui(self):
self.textbox = TextInput(size_hint_y=.1, multiline=False)
self.textbox.bind(on_text_validate=self.send_message)
self.label = Label(text='connecting...\n')
self.layout = BoxLayout(orientation='vertical')
self.layout.add_widget(self.label)
self.layout.add_widget(self.textbox)
return self.layout
def connect_to_server(self):
reactor.connectTCP('localhost', 8000, EchoFactory(self))
def on_connection(self, connection):
self.print_message("connected succesfully!")
self.connection = connection
def send_message(self, *args):
msg = self.textbox.text
if msg and self.connection:
self.connection.write(str(self.textbox.text))
self.textbox.text = ""
def print_message(self, msg):
self.label.text += msg + "\n"
if __name__ == '__main__':
TwistedClientApp().run()
| mit |
amueller/scipy-2017-sklearn | notebooks/solutions/23_batchtrain.py | 4 | 1947 | import os
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.base import clone
from sklearn.datasets import load_files
def batch_train(clf, fnames, labels, iterations=1,
batchsize=1000, random_seed=1):
vec = HashingVectorizer(encoding='latin-1')
idx = np.arange(labels.shape[0])
c_clf = clone(clf)
rng = np.random.RandomState(seed=random_seed)
shuffled_idx = rng.permutation(range(len(fnames)))
fnames_ary = np.asarray(fnames)
for _ in range(iterations):
for batch in np.split(shuffled_idx, len(fnames) // 1000):
documents = []
for fn in fnames_ary[batch]:
with open(fn, 'r') as f:
documents.append(f.read())
X_batch = vec.transform(documents)
batch_labels = labels[batch]
c_clf.partial_fit(X=X_batch,
y=batch_labels,
classes=[0, 1])
return c_clf
# Out-of-core Training
train_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'train')
train_pos = os.path.join(train_path, 'pos')
train_neg = os.path.join(train_path, 'neg')
fnames = [os.path.join(train_pos, f) for f in os.listdir(train_pos)] +\
[os.path.join(train_neg, f) for f in os.listdir(train_neg)]
y_train = np.zeros((len(fnames), ), dtype=int)
y_train[:12500] = 1
np.bincount(y_train)
sgd = SGDClassifier(loss='log', random_state=1)
sgd = batch_train(clf=sgd,
fnames=fnames,
labels=y_train)
# Testing
test_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'test')
test = load_files(container_path=(test_path),
categories=['pos', 'neg'])
docs_test, y_test = test['data'][12500:], test['target'][12500:]
vec = HashingVectorizer(encoding='latin-1')
print('accuracy:', sgd.score(vec.transform(docs_test), y_test))
| cc0-1.0 |
whiplash01/Hyperloop | src/hyperloop/battery_plot.py | 8 | 8289 | import numpy as np
import pylab as p
#design variables
# hl.Mach_bypass = .95
# hl.Mach_pod_max = .90
# hl.Mach_c1_in = .65
# hl.c1_PR_des = 13
# #initial guesses
# hl.compress.W_in = .35
# hl.flow_limit.radius_tube = hl.pod.radius_tube_inner = 178
# hl.compress.Ts_tube = hl.flow_limit.Ts_tube = hl.tube_wall_temp.tubeWallTemp = 322
# hl.compress.c2_PR_des = 5
#Mach, Battery Energy, Comp Energy/s, Time
#(old)
data2 = np.array([
[0.69999999999999996, 0.70999999999999996, 0.71999999999999997, 0.72999999999999998, 0.73999999999999999, 0.75, 0.76000000000000001, 0.77000000000000002, 0.78000000000000003, 0.79000000000000004, 0.80000000000000004, 0.81000000000000005, 0.82000000000000006, 0.83000000000000007, 0.84000000000000008, 0.85000000000000009, 0.8600000000000001, 0.87000000000000011, 0.88000000000000012, 0.89000000000000012, 0.90000000000000013, 0.91000000000000014, 0.92000000000000015, 0.93000000000000016],
[425.64734534294513, 419.65424475203628, 413.82772409685742, 408.16094022060747, 402.64742075574082, 397.28103790778948, 392.05598627110015, 386.96676904786113, 382.00815317806808, 377.1751865322513, 372.46316034132201, 367.86760002503087, 363.38424705208229, 359.00904154735571, 354.73813422797747, 350.56784050845937, 346.49464897789539, 342.51522233298749, 338.62635318664013, 334.82501553606085, 331.1082699045225, 327.47334674417999, 323.91756978015695, 320.43839279530488],
[345.33770199869105, 346.5392280454048, 347.77005182839076, 349.03173094706352, 350.32484572200718, 351.6500050517148, 353.00781977374407, 354.399264528071, 355.82499428958067, 357.28562569758549, 358.78210283805197, 360.31530103162191, 361.88440156468499, 363.49217031265931, 365.13789903116964, 366.82390861203487, 368.54951824043007, 370.31660813514264, 372.13130119605955, 373.97719159650512, 375.8732069051859, 377.81270605751422, 379.79821127671244, 381.83106344615129],
[2806.4660132501872, 2766.9510642991399, 2728.5344445946644, 2691.1710344215876, 2654.8181588290604, 2619.4354147766339, 2584.9845248643965, 2551.429246469414, 2518.7350758993498, 2486.8693617511071, 2455.8010571955297, 2425.500659505698, 2395.9400904532899, 2367.0925816309168, 2338.9327531514996, 2311.4363110447871, 2284.5801031509586, 2258.3421252724447, 2232.7012298020227, 2207.6374650729285, 2183.1314499199289, 2159.164923588, 2135.7202403087272, 2112.780611837175]
])
#updated mission.py (wrong power_req)
data3 = np.array([
[0.69999999999999996, 0.70999999999999996, 0.71999999999999997, 0.72999999999999998, 0.73999999999999999, 0.75, 0.76000000000000001, 0.77000000000000002, 0.78000000000000003, 0.79000000000000004, 0.80000000000000004, 0.81000000000000005, 0.82000000000000006, 0.83000000000000007, 0.84000000000000008, 0.85000000000000009, 0.8600000000000001, 0.87000000000000011, 0.88000000000000012, 0.89000000000000012, 0.90000000000000013, 0.91000000000000014, 0.92000000000000015, 0.93000000000000016],
[382.96647240956696, 379.61471316853721, 376.35849405942434, 373.19389006178545, 370.1171888064456, 367.12487556237443, 364.21362050544553, 361.38027069256651, 358.62182475717918, 355.93544218280636, 353.31842210450969, 350.76819794308955, 348.28232707355158, 345.85848093869816, 343.4944514211744, 341.18812519513483, 338.93748822477761, 336.74062606052388, 334.59569924025851, 332.50097149906588, 330.45475728852551, 328.45546757714186, 326.50156747044355, 324.591596306165],
[345.33770199869105, 346.5392280454048, 347.77005182839076, 349.03173094706352, 350.32484572200718, 351.6500050517148, 353.00781977374407, 354.399264528071, 355.82499428958067, 357.28562569758549, 358.78210283805197, 360.31530103162191, 361.88440156468499, 363.49217031265931, 365.13789903116964, 366.82390861203487, 368.54951824043007, 370.31660813514264, 372.13130119605955, 373.97719159650512, 375.8732069051859, 377.81270605751422, 379.79821127671244, 381.83106344615129],
[2525.0536642389029, 2502.9541527595861, 2481.4845762159844, 2460.6190553524316, 2440.3331130095312, 2420.6035751365348, 2401.4084868490913, 2382.7270595114273, 2364.5395038934889, 2346.8270913152069, 2329.5720138758879, 2312.7573490753157, 2296.3669916937465, 2280.3855886068009, 2264.7985807989521, 2249.592034253636, 2234.7526696139184, 2220.2678641353223, 2206.1254894962099, 2192.3140977960388, 2178.8225755287399, 2165.6404455635725, 2152.7575877172098, 2140.1643712494397]
])
#updated mission.py (correct)
data = np.array([
[0.69999999999999996, 0.70999999999999996, 0.71999999999999997, 0.72999999999999998, 0.73999999999999999, 0.75, 0.76000000000000001, 0.77000000000000002, 0.78000000000000003, 0.79000000000000004, 0.80000000000000004, 0.81000000000000005, 0.82000000000000006, 0.83000000000000007, 0.84000000000000008, 0.85000000000000009, 0.8600000000000001, 0.87000000000000011, 0.88000000000000012, 0.89000000000000012, 0.90000000000000013, 0.91000000000000014, 0.92000000000000015, 0.93000000000000016],
[314.8875274392023, 313.21759441929203, 311.63384044071688, 310.13454625507887, 308.71725492305012, 307.37967701456944, 306.11965739649798, 304.93548130571611, 303.82525891986256, 302.78718373388273, 301.81982489066297, 300.921782938533, 300.09033692516215, 299.32583299392121, 298.62581504950424, 297.99038489545626, 297.41725714015058, 296.90630110485154, 296.46079269733997, 296.06614172459581, 295.73592704550703, 295.46345005882404, 295.24931262932688, 295.09322476968293],
[345.33770199869105, 346.5392280454048, 347.77005182839076, 349.03173094706352, 350.32484572200718, 351.6500050517148, 353.00781977374407, 354.399264528071, 355.82499428958067, 357.28562569758549, 358.78210283805197, 360.31530103162191, 361.88440156468499, 363.49217031265931, 365.13789903116964, 366.82390861203487, 368.54951824043007, 370.31660813514264, 372.13130119605955, 373.97719159650512, 375.8732069051859, 377.81270605751422, 379.79821127671244, 381.83106344615129],
[2525.0536642389029, 2502.9541527595861, 2481.4845762159844, 2460.6190553524316, 2440.3331130095312, 2420.6035751365348, 2401.4084868490913, 2382.7270595114273, 2364.5395038934889, 2346.8270913152069, 2329.5720138758879, 2312.7573490753157, 2296.3669916937465, 2280.3855886068009, 2264.7985807989521, 2249.592034253636, 2234.7526696139184, 2220.2678641353223, 2206.1254894962099, 2192.3140977960388, 2178.8225755287399, 2165.6404455635725, 2152.7575877172098, 2140.1643712494397]
])
fig, ax = p.subplots()
# Twin the x-axis twice to make independent y-axes.
axes = [ax, ax.twinx(), ax.twinx()]
# Make some space on the right side for the extra y-axis.
fig.subplots_adjust(right=0.75)
# Move the last y-axis spine over to the right by 20% of the width of the axes
axes[-1].spines['right'].set_position(('axes', 1.2))
# To make the border of the right-most axis visible, we need to turn the frame
# on. This hides the other plots, however, so we need to turn its fill off.
axes[-1].set_frame_on(True)
axes[-1].patch.set_visible(False)
# And finally we get to plot things...
colors = ('Green', 'Red', 'Blue')
index = [1, 2, 3]
for i, ax, color in zip(index, axes, colors):
if i == 1:
ax.plot(data[0],data[1], color=color, lw=3)
ax.set_ylabel('Battery Size (kW-hr)', color=color, fontsize = 18)
ax.set_ylim([250,400])
if i == 2:
ax.plot(data[0],data[2], color=color, ls= "--", lw=3)
ax.set_ylabel('Max Compressor Pwr Req (kW)', color=color, fontsize = 18)
ax.set_ylim([250,400])
if i == 3:
ax.plot(data[0],data[3]/3600, color=color, ls= "--", lw=3)
ax.set_ylabel('Total Mission Time (hrs)', color=color, fontsize = 18)
ax.set_ylim([0.5+(0.025),0.9375])
ax.set_yticks([0.5+.05, 0.5+(0.055*1)+.05, 0.5+(0.055*2)+.05, 0.5+(0.055*3)+.05, 0.5+(0.055*4)+.05, 0.5+(0.055*5)+.05, 0.5+(0.055*6)+.05, 0.5+(0.055*7)+.05])
ax.tick_params(axis='y', colors=color)
axes[0].set_xlabel('Max Pod Mach', fontsize=18)
#p.tick_params(axis='both', which='major', labelsize=15)
#p.ylabel('Battery', fontsize=18)
#p.title('Battery size as a product of power and mission time over varying speeds', fontsize=18)
#p.plot(data[0],data[1], label="Tube (c1MN = .65)", lw=3)
#p.xlim([0.65,0.95])
#p.ylim([0,450])
#ax2 = p.twinx()
#ax2.plot(data[0],data[3], label="Tube", lw=3)
#ax2.set_ylabel('Total time')
p.legend(loc="best")
#p.gcf().set_size_inches(11,5.5)
#p.gcf().savefig('test2png.png',dpi=130)
p.show() | apache-2.0 |
vamsirajendra/nupic | nupic/regions/ImageSensorFilters/CenterSurroundConvolution.py | 17 | 7903 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
import numpy
from nupic.regions.ImageSensorFilters.Convolution import Convolution
class CenterSurroundConvolution(Convolution):
"""
Apply a bank of Gabor filters to the original image, and
return one or more images (same dimensions as the original)
containing the Gabor responses.
"""
def __init__(self,
scaleDecimation=[1],
filterDim=9,
gainConstant=1.0,
normalizationMethod='fixed',
perPlaneNormalization=False,
perPhaseNormalization=True,
postProcessingMethod='raw',
postProcessingSlope=1.0,
postProcessingCenter=0.5,
postProcessingMin=0.0,
postProcessingMax=1.0,
zeroThresholdOut=0.0,
boundaryMode='constrained',
offImagePixelValue=0,
suppressOutsideBox=True,
forceBoxContraction=False,
lobeSuppression=True):
"""
"""
Convolution.__init__(self,
scaleDecimation,
filterDim,
gainConstant,
normalizationMethod,
perPlaneNormalization,
perPhaseNormalization,
postProcessingMethod,
postProcessingSlope,
postProcessingCenter,
postProcessingMin,
postProcessingMax,
zeroThresholdOut,
boundaryMode,
offImagePixelValue,
suppressOutsideBox,
forceBoxContraction)
self._lobeSuppression = lobeSuppression
def _buildFilterBank(self):
"""Build an array of Gabor filters.
Also build a 1-D vector of filter bank indices that maps each output
location to a particular (customized) bank of gabor filters.
"""
# Make sure dimensions of our Gabor filters are odd
assert self._filterDim % 2 == 1
# Create mesh grid indices. The result will be a numpy array of
# shape (2, filterDim, filterDim).
# Then meshGrid[0] stores the row indices of the master grid,
# and meshGrid[1] stores the column indices.
lowerIndex = -(self._filterDim / 2)
upperIndex = 1 + self._filterDim / 2
meshGrid = numpy.mgrid[lowerIndex:upperIndex, lowerIndex:upperIndex]
# If we are supposed to produce only center-surround output
# (no oriented responses), then we will still go through the
# process of making a minimalist bank of 2 oriented gabor
# filters since that is needed by the center-surround filter
# generation code
orientationCount = 2
# Select the orientation sample points (in radians)
radianInterval = numpy.pi / float(orientationCount)
orientations = numpy.array(range(orientationCount), dtype=RealNumpyDType) * \
radianInterval
# Compute trigonometric functions of orientation
sinTheta = numpy.sin(orientations).reshape(orientationCount, 1, 1)
cosTheta = numpy.cos(orientations).reshape(orientationCount, 1, 1)
# Construct two filterDim X filterDim arrays containing y (row) and
# x (column) coordinates (in dimensions of pixels), respectively.
y = meshGrid[0].reshape(1, self._filterDim, self._filterDim)
x = meshGrid[1].reshape(1, self._filterDim, self._filterDim)
X = x * cosTheta - y * sinTheta
Y = x * sinTheta + y * cosTheta
# Build the exponential term
numerator = (X * X + self._aspectRatio * self._aspectRatio * Y * Y)
denominator = -2.0 * self._effectiveWidth * self._effectiveWidth
exponentialTerm = numpy.exp(numerator / denominator)
# Build the center-surround filters
expFilter = exponentialTerm[0] * exponentialTerm[orientationCount/2]
# Cubing the raw exponential component seems to give a nice
# center-surround filter
centerSurround = expFilter * expFilter * expFilter
filterBank = centerSurround[numpy.newaxis,:,:]
# Apply lobe suppression: Suppress the outer lobes of the sinusoidal
# component of the Gabor filters so as to avoid "ringing" effects in
# the Gabor response maps.
#
# We make a single lobe-suppression mask (which is directionally
# oriented.) Then we rotate this mask by each orientation and
# apply it to the pre-suppressed filter bank.
# In order to minimize discontinuities in the gradients, the
# suppression mask will be constructed as follows:
#
# y = 1 - |x|^p
#
# where:
# y = Suppression (0 for total suppression, 1 for no-suppression)
# x = position relative to center
# p = Some exponent that controls the sharpness of suppression
filterCount = filterBank.shape[0]
# New lobe suppression.
if self._lobeSuppression:
# The orientation is always vertical, so we'll locate the discrete
# filter cell where we go negative
halfFilterDim = (self._filterDim - 1) / 2
firstBadCell = None
for cellIdx in xrange(halfFilterDim, self._filterDim):
if filterBank[0, 0, cellIdx] < 0.0:
firstBadCell = cellIdx - halfFilterDim
break
if firstBadCell is not None:
radialDist = numpy.abs(X / float(halfFilterDim))
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the second bad cell.
# This seems to give good results in practice.
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell + 1])
suppressTerm = (radialDist < distThresh).astype(RealNumpyDType)
if self._centerSurround:
suppressTerm = numpy.concatenate((suppressTerm,
numpy.ones((1, self._filterDim, self._filterDim),
dtype=RealNumpyDType)))
filterBank *= suppressTerm
# Normalize so that mean of each filter is zero
means = filterBank.mean(axis=2).mean(axis=1).reshape(filterCount, 1, 1)
offsets = means.repeat(self._filterDim, axis=1).repeat(self._filterDim, axis=2)
filterBank -= offsets
# Normalize so that sum of squares over each filter is one
squareSums = (filterBank * filterBank).sum(axis=2).sum(axis=1).reshape(filterCount, 1, 1)
scalars = 1.0 / numpy.sqrt(squareSums)
filterBank *= scalars
self._filterBank = (filterBank.astype(numpy.float32) * 4096.0).astype(numpy.int32)
def _calcPlaneCount(self):
"""Computes the number of responses planes for a particular Gabor
configuration.
"""
return self._orientationCount + 1
def _getNeededBufferCount(self):
"""Compute the number of allocated buffers to hold the responses.
"""
return self._orientationCount + 1
| agpl-3.0 |
dmilith/SublimeText3-dmilith | Packages/pygments/all/pygments/lexers/resource.py | 46 | 2913 | # -*- coding: utf-8 -*-
"""
pygments.lexers.resource
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for resource definition files.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Comment, String, Number, Operator, Text, \
Keyword, Name
__all__ = ['ResourceLexer']
class ResourceLexer(RegexLexer):
"""Lexer for `ICU Resource bundles
<http://userguide.icu-project.org/locale/resources>`_.
.. versionadded:: 2.0
"""
name = 'ResourceBundle'
aliases = ['resource', 'resourcebundle']
filenames = ['*.txt']
_types = (':table', ':array', ':string', ':bin', ':import', ':intvector',
':int', ':alias')
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'//.*?$', Comment),
(r'"', String, 'string'),
(r'-?\d+', Number.Integer),
(r'[,{}]', Operator),
(r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types),
bygroups(Name, Text, Keyword)),
(r'\s+', Text),
(words(_types), Keyword),
],
'string': [
(r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|'
r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String),
(r'\{', String.Escape, 'msgname'),
(r'"', String, '#pop')
],
'msgname': [
(r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message'))
],
'message': [
(r'\{', String.Escape, 'msgname'),
(r'\}', String.Escape, '#pop'),
(r'(,)(\s*)([a-z]+)(\s*\})',
bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'),
(r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)',
bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
String.Escape, Operator.Word, String.Escape, Operator,
String.Escape, Number.Integer, String.Escape), 'choice'),
(r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)',
bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
String.Escape), 'choice'),
(r'\s+', String.Escape)
],
'choice': [
(r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)',
bygroups(Operator, Number.Integer, String.Escape), 'message'),
(r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'),
(r'\}', String.Escape, ('#pop', '#pop')),
(r'\s+', String.Escape)
],
'str': [
(r'\}', String.Escape, '#pop'),
(r'\{', String.Escape, 'msgname'),
(r'[^{}]+', String)
]
}
def analyse_text(text):
return text.startswith('root:table')
| mit |
windskyer/nova | nova/console/xvp.py | 28 | 7148 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""XVP (Xenserver VNC Proxy) driver."""
import os
import signal
import jinja2
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from nova import context
from nova import db
from nova.i18n import _, _LE
from nova import paths
from nova import utils
xvp_opts = [
cfg.StrOpt('console_xvp_conf_template',
default=paths.basedir_def('nova/console/xvp.conf.template'),
help='XVP conf template'),
cfg.StrOpt('console_xvp_conf',
default='/etc/xvp.conf',
help='Generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
help='XVP master process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
cfg.IntOpt('console_xvp_multiplex_port',
default=5900,
min=1,
max=65535,
help='Port for XVP to multiplex VNC connections on'),
]
CONF = cfg.CONF
CONF.register_opts(xvp_opts)
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class XVPConsoleProxy(object):
"""Sets up XVP config, and manages XVP daemon."""
def __init__(self):
self.xvpconf_template = open(CONF.console_xvp_conf_template).read()
self.host = CONF.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
def console_type(self):
return 'vnc+xvp'
def get_port(self, context):
"""Get available port for consoles that need one."""
# TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
return CONF.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def teardown_console(self, context, console):
"""Tears down actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def init_host(self):
"""Start up any config'ed consoles on start."""
ctxt = context.get_admin_context()
self._rebuild_xvp_conf(ctxt)
def fix_pool_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password, is_pool_password=True)
def fix_console_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password)
def _rebuild_xvp_conf(self, context):
LOG.debug('Rebuilding xvp conf')
pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host,
self.console_type)
if pool['consoles']]
if not pools:
LOG.debug('No console pools!')
self._xvp_stop()
return
conf_data = {'multiplex_port': CONF.console_xvp_multiplex_port,
'pools': pools}
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
env.filters['pass_encode'] = self.fix_console_password
template = env.get_template(tmpl_file)
self._write_conf(template.render(conf_data))
self._xvp_restart()
def _write_conf(self, config):
try:
LOG.debug('Re-wrote %s', CONF.console_xvp_conf)
with open(CONF.console_xvp_conf, 'w') as cfile:
cfile.write(config)
except IOError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to write configuration file"))
def _xvp_stop(self):
LOG.debug('Stopping xvp')
pid = self._xvp_pid()
if not pid:
return
try:
os.kill(pid, signal.SIGTERM)
except OSError:
# if it's already not running, no problem.
pass
def _xvp_start(self):
if self._xvp_check_running():
return
LOG.debug('Starting xvp')
try:
utils.execute('xvp',
'-p', CONF.console_xvp_pid,
'-c', CONF.console_xvp_conf,
'-l', CONF.console_xvp_log)
except processutils.ProcessExecutionError as err:
LOG.error(_LE('Error starting xvp: %s'), err)
def _xvp_restart(self):
LOG.debug('Restarting xvp')
if not self._xvp_check_running():
LOG.debug('xvp not running...')
self._xvp_start()
else:
pid = self._xvp_pid()
os.kill(pid, signal.SIGUSR1)
def _xvp_pid(self):
try:
with open(CONF.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
except ValueError:
return None
return pid
def _xvp_check_running(self):
pid = self._xvp_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _xvp_encrypt(self, password, is_pool_password=False):
"""Call xvp to obfuscate passwords for config file.
Args:
- password: the password to encode, max 8 char for vm passwords,
and 16 chars for pool passwords. passwords will
be trimmed to max len before encoding.
- is_pool_password: True if this is the XenServer api password
False if it's a VM console password
(xvp uses different keys and max lengths for pool passwords)
Note that xvp's obfuscation should not be considered 'real' encryption.
It simply DES encrypts the passwords with static keys plainly viewable
in the xvp source code.
"""
maxlen = 8
flag = '-e'
if is_pool_password:
maxlen = 16
flag = '-x'
# xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp', flag, process_input=password)
if err:
raise processutils.ProcessExecutionError(_("Failed to run xvp."))
return out.strip()
| gpl-2.0 |
ermin-sakic/common-open-research-emulator-CORE | coreemu-read-only/daemon/core/emane/rfpipe.py | 11 | 4945 | #
# CORE
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Jeff Ahrenholz <[email protected]>
# Harry Bullen <[email protected]>
#
'''
rfpipe.py: EMANE RF-PIPE model for CORE
'''
import sys
import string
try:
from emanesh.events import EventService
except:
pass
from core.api import coreapi
from core.constants import *
from emane import EmaneModel
from universal import EmaneUniversalModel
class EmaneRfPipeModel(EmaneModel):
def __init__(self, session, objid = None, verbose = False):
EmaneModel.__init__(self, session, objid, verbose)
# model name
_name = "emane_rfpipe"
if 'EventService' in globals():
xml_path = '/usr/share/emane/xml/models/mac/rfpipe'
else:
xml_path = "/usr/share/emane/models/rfpipe/xml"
# configuration parameters are
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
# MAC parameters
_confmatrix_mac_base = [
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
'True,False', 'enable promiscuous mode'),
("datarate", coreapi.CONF_DATA_TYPE_UINT32, '1M',
'', 'data rate (bps)'),
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off', 'enable traffic flow control'),
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
'', 'number of flow control tokens'),
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
'%s/rfpipepcr.xml' % xml_path,
'', 'SINR/PCR curve file'),
]
_confmatrix_mac_081 = [
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'', 'transmission jitter (usec)'),
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'', 'transmission delay (usec)'),
("transmissioncontrolmap", coreapi.CONF_DATA_TYPE_STRING, '',
'', 'tx control map (nem:rate:freq:tx_dBm)'),
("enabletighttiming", coreapi.CONF_DATA_TYPE_BOOL, '0',
'On,Off', 'enable tight timing for pkt delay'),
]
_confmatrix_mac_091 = [
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'', 'transmission jitter (sec)'),
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
'', 'transmission delay (sec)'),
]
if 'EventService' in globals():
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_091
else:
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_081
# PHY parameters from Universal PHY
_confmatrix_phy = EmaneUniversalModel._confmatrix
_confmatrix = _confmatrix_mac + _confmatrix_phy
# value groupings
_confgroups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
def buildnemxmlfiles(self, e, ifc):
''' Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
'''
values = e.getifcconfig(self.objid, self._name,
self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "RF-PIPE NEM")
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
names = list(self.getnames())
macnames = names[:len(self._confmatrix_mac)]
phynames = names[len(self._confmatrix_mac):]
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "RF-PIPE MAC")
mac.setAttribute("library", "rfpipemaclayer")
if e.version < e.EMANE091 and \
self.valueof("transmissioncontrolmap", values) is "":
macnames.remove("transmissioncontrolmap")
# EMANE 0.7.4 support
if e.version == e.EMANE074:
# convert datarate from bps to kbps
i = names.index('datarate')
values = list(values)
values[i] = self.emane074_fixup(values[i], 1000)
# append MAC options to macdoc
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
self.valueof(n, values))), macnames)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
| bsd-3-clause |
madAndroid/jenkins-job-builder | jenkins_jobs/local_yaml.py | 11 | 11002 | #!/usr/bin/env python
# Copyright (C) 2013 Hewlett-Packard.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Provides local yaml parsing classes and extend yaml module
"""Custom application specific yamls tags are supported to provide
enhancements when reading yaml configuration.
These allow inclusion of arbitrary files as a method of having blocks of data
managed separately to the yaml job configurations. A specific usage of this is
inlining scripts contained in separate files, although such tags may also be
used to simplify usage of macros or job templates.
The tag ``!include:`` will treat the following string as file which should be
parsed as yaml configuration data.
Example:
.. literalinclude:: /../../tests/localyaml/fixtures/include001.yaml
contents of include001.yaml.inc:
.. literalinclude:: /../../tests/yamlparser/fixtures/include001.yaml.inc
The tag ``!include-raw:`` will treat the given string or list of strings as
filenames to be opened as one or more data blob, which should be read into
the calling yaml construct without any further parsing. Any data in a file
included through this tag, will be treated as string data.
Examples:
.. literalinclude:: /../../tests/localyaml/fixtures/include-raw001.yaml
contents of include-raw001-hello-world.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-hello-world.sh
contents of include-raw001-vars.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-vars.sh
using a list of files:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-multi001.yaml
The tag ``!include-raw-escape:`` treats the given string or list of strings as
filenames to be opened as one or more data blobs, which should be escaped
before being read in as string data. This allows job-templates to use this tag
to include scripts from files without needing to escape braces in the original
file.
Examples:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-escaped001.yaml
contents of include-raw001-hello-world.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-hello-world.sh
contents of include-raw001-vars.sh:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw001-vars.sh
using a list of files:
.. literalinclude::
/../../tests/localyaml/fixtures/include-raw-escaped-multi001.yaml
For all the multi file includes, the files are simply appended using a newline
character.
"""
import functools
import io
import logging
import re
import os
import yaml
from yaml.constructor import BaseConstructor
from yaml import YAMLObject
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
logger = logging.getLogger(__name__)
class OrderedConstructor(BaseConstructor):
"""The default constructor class for PyYAML loading uses standard python
dictionaries which can have randomized ordering enabled (default in
CPython from version 3.3). The order of the XML elements being outputted
is both important for tests and for ensuring predictable generation based
on the source. This subclass overrides this behaviour to ensure that all
dict's created make use of OrderedDict to have iteration of keys to always
follow the order in which the keys were inserted/created.
"""
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None, None,
'expected a mapping node, but found %s' % node.id,
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=False)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping', node.start_mark,
'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=False)
mapping[key] = value
data.update(mapping)
class LocalAnchorLoader(yaml.Loader):
"""Subclass for yaml.Loader which keeps Alias between calls"""
anchors = {}
def __init__(self, *args, **kwargs):
super(LocalAnchorLoader, self).__init__(*args, **kwargs)
self.anchors = LocalAnchorLoader.anchors
@classmethod
def reset_anchors(cls):
cls.anchors = {}
# override the default composer to skip resetting the anchors at the
# end of the current document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
return node
class LocalLoader(OrderedConstructor, LocalAnchorLoader):
"""Subclass for yaml.Loader which handles storing the search_path and
escape_callback functions for use by the custom YAML objects to find files
and escape the content where required.
Constructor access a list of search paths to look under for the given
file following each tag, taking the first match found. Search path by
default will include the same directory as the yaml file and the current
working directory.
Loading::
# use the load function provided in this module
import local_yaml
data = local_yaml.load(io.open(fn, 'r', encoding='utf-8'))
# Loading by providing the alternate class to the default yaml load
from local_yaml import LocalLoader
data = yaml.load(io.open(fn, 'r', encoding='utf-8'), LocalLoader)
# Loading with a search path
from local_yaml import LocalLoader
import functools
data = yaml.load(io.open(fn, 'r', encoding='utf-8'),
functools.partial(LocalLoader, search_path=['path']))
"""
def __init__(self, *args, **kwargs):
# make sure to pop off any local settings before passing to
# the parent constructor as any unknown args may cause errors.
self.search_path = list()
if 'search_path' in kwargs:
for p in kwargs.pop('search_path'):
logger.debug("Adding '{0}' to search path for include tags"
.format(p))
self.search_path.append(os.path.normpath(p))
if 'escape_callback' in kwargs:
self.escape_callback = kwargs.pop('escape_callback')
else:
self.escape_callback = self._escape
super(LocalLoader, self).__init__(*args, **kwargs)
# constructor to preserve order of maps and ensure that the order of
# keys returned is consistent across multiple python versions
self.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
type(self).construct_yaml_map)
if hasattr(self.stream, 'name'):
self.search_path.append(os.path.normpath(
os.path.dirname(self.stream.name)))
self.search_path.append(os.path.normpath(os.path.curdir))
def _escape(self, data):
return re.sub(r'({|})', r'\1\1', data)
class BaseYAMLObject(YAMLObject):
yaml_loader = LocalLoader
yaml_dumper = yaml.Dumper
class YamlInclude(BaseYAMLObject):
yaml_tag = u'!include:'
@classmethod
def _find_file(cls, filename, search_path):
for dirname in search_path:
candidate = os.path.expanduser(os.path.join(dirname, filename))
if os.path.isfile(candidate):
logger.info("Including file '{0}' from path '{1}'"
.format(filename, dirname))
return candidate
return filename
@classmethod
def _open_file(cls, loader, scalar_node):
filename = cls._find_file(loader.construct_yaml_str(scalar_node),
loader.search_path)
try:
with io.open(filename, 'r', encoding='utf-8') as f:
return f.read()
except:
logger.error("Failed to include file using search path: '{0}'"
.format(':'.join(loader.search_path)))
raise
@classmethod
def _from_file(cls, loader, node):
data = yaml.load(cls._open_file(loader, node),
functools.partial(cls.yaml_loader,
search_path=loader.search_path))
return data
@classmethod
def from_yaml(cls, loader, node):
if isinstance(node, yaml.ScalarNode):
return cls._from_file(loader, node)
elif isinstance(node, yaml.SequenceNode):
return u'\n'.join(cls._from_file(loader, scalar_node)
for scalar_node in node.value)
else:
raise yaml.constructor.ConstructorError(
None, None, "expected either a sequence or scalar node, but "
"found %s" % node.id, node.start_mark)
class YamlIncludeRaw(YamlInclude):
yaml_tag = u'!include-raw:'
@classmethod
def _from_file(cls, loader, node):
return cls._open_file(loader, node)
class YamlIncludeRawEscape(YamlIncludeRaw):
yaml_tag = u'!include-raw-escape:'
@classmethod
def from_yaml(cls, loader, node):
return loader.escape_callback(YamlIncludeRaw.from_yaml(loader, node))
class DeprecatedTag(BaseYAMLObject):
@classmethod
def from_yaml(cls, loader, node):
logger.warn("tag '%s' is deprecated, switch to using '%s'",
cls.yaml_tag, cls._new.yaml_tag)
return cls._new.from_yaml(loader, node)
class YamlIncludeDeprecated(DeprecatedTag):
yaml_tag = u'!include'
_new = YamlInclude
class YamlIncludeRawDeprecated(DeprecatedTag):
yaml_tag = u'!include-raw'
_new = YamlIncludeRaw
class YamlIncludeRawEscapeDeprecated(DeprecatedTag):
yaml_tag = u'!include-raw-escape'
_new = YamlIncludeRawEscape
def load(stream, **kwargs):
LocalAnchorLoader.reset_anchors()
return yaml.load(stream, functools.partial(LocalLoader, **kwargs))
| apache-2.0 |
kalugny/pypachy | src/python_pachyderm/proto/v2/identity/identity_pb2.py | 1 | 50757 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: python_pachyderm/proto/v2/identity/identity.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='python_pachyderm/proto/v2/identity/identity.proto',
package='identity_v2',
syntax='proto3',
serialized_options=b'Z.github.com/pachyderm/pachyderm/v2/src/identity',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n1python_pachyderm/proto/v2/identity/identity.proto\x12\x0bidentity_v2\x1a\x1fgoogle/protobuf/timestamp.proto\"M\n\x04User\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x36\n\x12last_authenticated\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"?\n\x14IdentityServerConfig\x12\x0e\n\x06issuer\x18\x01 \x01(\t\x12\x17\n\x0fid_token_expiry\x18\x02 \x01(\t\"S\n\x1eSetIdentityServerConfigRequest\x12\x31\n\x06\x63onfig\x18\x01 \x01(\x0b\x32!.identity_v2.IdentityServerConfig\"!\n\x1fSetIdentityServerConfigResponse\" \n\x1eGetIdentityServerConfigRequest\"T\n\x1fGetIdentityServerConfigResponse\x12\x31\n\x06\x63onfig\x18\x01 \x01(\x0b\x32!.identity_v2.IdentityServerConfig\"a\n\x0cIDPConnector\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x15\n\rconfigVersion\x18\x04 \x01(\x03\x12\x12\n\njsonConfig\x18\x05 \x01(\t\"I\n\x19\x43reateIDPConnectorRequest\x12,\n\tconnector\x18\x01 \x01(\x0b\x32\x19.identity_v2.IDPConnector\"\x1c\n\x1a\x43reateIDPConnectorResponse\"I\n\x19UpdateIDPConnectorRequest\x12,\n\tconnector\x18\x01 \x01(\x0b\x32\x19.identity_v2.IDPConnector\"\x1c\n\x1aUpdateIDPConnectorResponse\"\x1a\n\x18ListIDPConnectorsRequest\"J\n\x19ListIDPConnectorsResponse\x12-\n\nconnectors\x18\x01 \x03(\x0b\x32\x19.identity_v2.IDPConnector\"$\n\x16GetIDPConnectorRequest\x12\n\n\x02id\x18\x01 \x01(\t\"G\n\x17GetIDPConnectorResponse\x12,\n\tconnector\x18\x01 \x01(\x0b\x32\x19.identity_v2.IDPConnector\"\'\n\x19\x44\x65leteIDPConnectorRequest\x12\n\n\x02id\x18\x01 \x01(\t\"\x1c\n\x1a\x44\x65leteIDPConnectorResponse\"d\n\nOIDCClient\x12\n\n\x02id\x18\x01 \x01(\t\x12\x15\n\rredirect_uris\x18\x02 \x03(\t\x12\x15\n\rtrusted_peers\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06secret\x18\x05 \x01(\t\"B\n\x17\x43reateOIDCClientRequest\x12\'\n\x06\x63lient\x18\x01 \x01(\x0b\x32\x17.identity_v2.OIDCClient\"C\n\x18\x43reateOIDCClientResponse\x12\'\n\x06\x63lient\x18\x01 \x01(\x0b\x32\x17.identity_v2.OIDCClient\"\"\n\x14GetOIDCClientRequest\x12\n\n\x02id\x18\x01 \x01(\t\"@\n\x15GetOIDCClientResponse\x12\'\n\x06\x63lient\x18\x01 \x01(\x0b\x32\x17.identity_v2.OIDCClient\"\x18\n\x16ListOIDCClientsRequest\"C\n\x17ListOIDCClientsResponse\x12(\n\x07\x63lients\x18\x01 \x03(\x0b\x32\x17.identity_v2.OIDCClient\"B\n\x17UpdateOIDCClientRequest\x12\'\n\x06\x63lient\x18\x01 \x01(\x0b\x32\x17.identity_v2.OIDCClient\"\x1a\n\x18UpdateOIDCClientResponse\"%\n\x17\x44\x65leteOIDCClientRequest\x12\n\n\x02id\x18\x01 \x01(\t\"\x1a\n\x18\x44\x65leteOIDCClientResponse\"\x12\n\x10\x44\x65leteAllRequest\"\x13\n\x11\x44\x65leteAllResponse2\xa7\n\n\x03\x41PI\x12v\n\x17SetIdentityServerConfig\x12+.identity_v2.SetIdentityServerConfigRequest\x1a,.identity_v2.SetIdentityServerConfigResponse\"\x00\x12v\n\x17GetIdentityServerConfig\x12+.identity_v2.GetIdentityServerConfigRequest\x1a,.identity_v2.GetIdentityServerConfigResponse\"\x00\x12g\n\x12\x43reateIDPConnector\x12&.identity_v2.CreateIDPConnectorRequest\x1a\'.identity_v2.CreateIDPConnectorResponse\"\x00\x12g\n\x12UpdateIDPConnector\x12&.identity_v2.UpdateIDPConnectorRequest\x1a\'.identity_v2.UpdateIDPConnectorResponse\"\x00\x12\x64\n\x11ListIDPConnectors\x12%.identity_v2.ListIDPConnectorsRequest\x1a&.identity_v2.ListIDPConnectorsResponse\"\x00\x12^\n\x0fGetIDPConnector\x12#.identity_v2.GetIDPConnectorRequest\x1a$.identity_v2.GetIDPConnectorResponse\"\x00\x12g\n\x12\x44\x65leteIDPConnector\x12&.identity_v2.DeleteIDPConnectorRequest\x1a\'.identity_v2.DeleteIDPConnectorResponse\"\x00\x12\x61\n\x10\x43reateOIDCClient\x12$.identity_v2.CreateOIDCClientRequest\x1a%.identity_v2.CreateOIDCClientResponse\"\x00\x12\x61\n\x10UpdateOIDCClient\x12$.identity_v2.UpdateOIDCClientRequest\x1a%.identity_v2.UpdateOIDCClientResponse\"\x00\x12X\n\rGetOIDCClient\x12!.identity_v2.GetOIDCClientRequest\x1a\".identity_v2.GetOIDCClientResponse\"\x00\x12^\n\x0fListOIDCClients\x12#.identity_v2.ListOIDCClientsRequest\x1a$.identity_v2.ListOIDCClientsResponse\"\x00\x12\x61\n\x10\x44\x65leteOIDCClient\x12$.identity_v2.DeleteOIDCClientRequest\x1a%.identity_v2.DeleteOIDCClientResponse\"\x00\x12L\n\tDeleteAll\x12\x1d.identity_v2.DeleteAllRequest\x1a\x1e.identity_v2.DeleteAllResponse\"\x00\x42\x30Z.github.com/pachyderm/pachyderm/v2/src/identityb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_USER = _descriptor.Descriptor(
name='User',
full_name='identity_v2.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='email', full_name='identity_v2.User.email', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_authenticated', full_name='identity_v2.User.last_authenticated', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=176,
)
_IDENTITYSERVERCONFIG = _descriptor.Descriptor(
name='IdentityServerConfig',
full_name='identity_v2.IdentityServerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='issuer', full_name='identity_v2.IdentityServerConfig.issuer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id_token_expiry', full_name='identity_v2.IdentityServerConfig.id_token_expiry', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=178,
serialized_end=241,
)
_SETIDENTITYSERVERCONFIGREQUEST = _descriptor.Descriptor(
name='SetIdentityServerConfigRequest',
full_name='identity_v2.SetIdentityServerConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='identity_v2.SetIdentityServerConfigRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=243,
serialized_end=326,
)
_SETIDENTITYSERVERCONFIGRESPONSE = _descriptor.Descriptor(
name='SetIdentityServerConfigResponse',
full_name='identity_v2.SetIdentityServerConfigResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=361,
)
_GETIDENTITYSERVERCONFIGREQUEST = _descriptor.Descriptor(
name='GetIdentityServerConfigRequest',
full_name='identity_v2.GetIdentityServerConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=363,
serialized_end=395,
)
_GETIDENTITYSERVERCONFIGRESPONSE = _descriptor.Descriptor(
name='GetIdentityServerConfigResponse',
full_name='identity_v2.GetIdentityServerConfigResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='identity_v2.GetIdentityServerConfigResponse.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=397,
serialized_end=481,
)
_IDPCONNECTOR = _descriptor.Descriptor(
name='IDPConnector',
full_name='identity_v2.IDPConnector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.IDPConnector.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='identity_v2.IDPConnector.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='identity_v2.IDPConnector.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='configVersion', full_name='identity_v2.IDPConnector.configVersion', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='jsonConfig', full_name='identity_v2.IDPConnector.jsonConfig', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=483,
serialized_end=580,
)
_CREATEIDPCONNECTORREQUEST = _descriptor.Descriptor(
name='CreateIDPConnectorRequest',
full_name='identity_v2.CreateIDPConnectorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='connector', full_name='identity_v2.CreateIDPConnectorRequest.connector', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=582,
serialized_end=655,
)
_CREATEIDPCONNECTORRESPONSE = _descriptor.Descriptor(
name='CreateIDPConnectorResponse',
full_name='identity_v2.CreateIDPConnectorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=657,
serialized_end=685,
)
_UPDATEIDPCONNECTORREQUEST = _descriptor.Descriptor(
name='UpdateIDPConnectorRequest',
full_name='identity_v2.UpdateIDPConnectorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='connector', full_name='identity_v2.UpdateIDPConnectorRequest.connector', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=687,
serialized_end=760,
)
_UPDATEIDPCONNECTORRESPONSE = _descriptor.Descriptor(
name='UpdateIDPConnectorResponse',
full_name='identity_v2.UpdateIDPConnectorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=762,
serialized_end=790,
)
_LISTIDPCONNECTORSREQUEST = _descriptor.Descriptor(
name='ListIDPConnectorsRequest',
full_name='identity_v2.ListIDPConnectorsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=792,
serialized_end=818,
)
_LISTIDPCONNECTORSRESPONSE = _descriptor.Descriptor(
name='ListIDPConnectorsResponse',
full_name='identity_v2.ListIDPConnectorsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='connectors', full_name='identity_v2.ListIDPConnectorsResponse.connectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=820,
serialized_end=894,
)
_GETIDPCONNECTORREQUEST = _descriptor.Descriptor(
name='GetIDPConnectorRequest',
full_name='identity_v2.GetIDPConnectorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.GetIDPConnectorRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=896,
serialized_end=932,
)
_GETIDPCONNECTORRESPONSE = _descriptor.Descriptor(
name='GetIDPConnectorResponse',
full_name='identity_v2.GetIDPConnectorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='connector', full_name='identity_v2.GetIDPConnectorResponse.connector', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=934,
serialized_end=1005,
)
_DELETEIDPCONNECTORREQUEST = _descriptor.Descriptor(
name='DeleteIDPConnectorRequest',
full_name='identity_v2.DeleteIDPConnectorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.DeleteIDPConnectorRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1007,
serialized_end=1046,
)
_DELETEIDPCONNECTORRESPONSE = _descriptor.Descriptor(
name='DeleteIDPConnectorResponse',
full_name='identity_v2.DeleteIDPConnectorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1048,
serialized_end=1076,
)
_OIDCCLIENT = _descriptor.Descriptor(
name='OIDCClient',
full_name='identity_v2.OIDCClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.OIDCClient.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='redirect_uris', full_name='identity_v2.OIDCClient.redirect_uris', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trusted_peers', full_name='identity_v2.OIDCClient.trusted_peers', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='identity_v2.OIDCClient.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret', full_name='identity_v2.OIDCClient.secret', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1078,
serialized_end=1178,
)
_CREATEOIDCCLIENTREQUEST = _descriptor.Descriptor(
name='CreateOIDCClientRequest',
full_name='identity_v2.CreateOIDCClientRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='client', full_name='identity_v2.CreateOIDCClientRequest.client', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1180,
serialized_end=1246,
)
_CREATEOIDCCLIENTRESPONSE = _descriptor.Descriptor(
name='CreateOIDCClientResponse',
full_name='identity_v2.CreateOIDCClientResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='client', full_name='identity_v2.CreateOIDCClientResponse.client', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1248,
serialized_end=1315,
)
_GETOIDCCLIENTREQUEST = _descriptor.Descriptor(
name='GetOIDCClientRequest',
full_name='identity_v2.GetOIDCClientRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.GetOIDCClientRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1317,
serialized_end=1351,
)
_GETOIDCCLIENTRESPONSE = _descriptor.Descriptor(
name='GetOIDCClientResponse',
full_name='identity_v2.GetOIDCClientResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='client', full_name='identity_v2.GetOIDCClientResponse.client', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1353,
serialized_end=1417,
)
_LISTOIDCCLIENTSREQUEST = _descriptor.Descriptor(
name='ListOIDCClientsRequest',
full_name='identity_v2.ListOIDCClientsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1419,
serialized_end=1443,
)
_LISTOIDCCLIENTSRESPONSE = _descriptor.Descriptor(
name='ListOIDCClientsResponse',
full_name='identity_v2.ListOIDCClientsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='clients', full_name='identity_v2.ListOIDCClientsResponse.clients', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1445,
serialized_end=1512,
)
_UPDATEOIDCCLIENTREQUEST = _descriptor.Descriptor(
name='UpdateOIDCClientRequest',
full_name='identity_v2.UpdateOIDCClientRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='client', full_name='identity_v2.UpdateOIDCClientRequest.client', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1514,
serialized_end=1580,
)
_UPDATEOIDCCLIENTRESPONSE = _descriptor.Descriptor(
name='UpdateOIDCClientResponse',
full_name='identity_v2.UpdateOIDCClientResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1582,
serialized_end=1608,
)
_DELETEOIDCCLIENTREQUEST = _descriptor.Descriptor(
name='DeleteOIDCClientRequest',
full_name='identity_v2.DeleteOIDCClientRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='identity_v2.DeleteOIDCClientRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1610,
serialized_end=1647,
)
_DELETEOIDCCLIENTRESPONSE = _descriptor.Descriptor(
name='DeleteOIDCClientResponse',
full_name='identity_v2.DeleteOIDCClientResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1649,
serialized_end=1675,
)
_DELETEALLREQUEST = _descriptor.Descriptor(
name='DeleteAllRequest',
full_name='identity_v2.DeleteAllRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1677,
serialized_end=1695,
)
_DELETEALLRESPONSE = _descriptor.Descriptor(
name='DeleteAllResponse',
full_name='identity_v2.DeleteAllResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1697,
serialized_end=1716,
)
_USER.fields_by_name['last_authenticated'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SETIDENTITYSERVERCONFIGREQUEST.fields_by_name['config'].message_type = _IDENTITYSERVERCONFIG
_GETIDENTITYSERVERCONFIGRESPONSE.fields_by_name['config'].message_type = _IDENTITYSERVERCONFIG
_CREATEIDPCONNECTORREQUEST.fields_by_name['connector'].message_type = _IDPCONNECTOR
_UPDATEIDPCONNECTORREQUEST.fields_by_name['connector'].message_type = _IDPCONNECTOR
_LISTIDPCONNECTORSRESPONSE.fields_by_name['connectors'].message_type = _IDPCONNECTOR
_GETIDPCONNECTORRESPONSE.fields_by_name['connector'].message_type = _IDPCONNECTOR
_CREATEOIDCCLIENTREQUEST.fields_by_name['client'].message_type = _OIDCCLIENT
_CREATEOIDCCLIENTRESPONSE.fields_by_name['client'].message_type = _OIDCCLIENT
_GETOIDCCLIENTRESPONSE.fields_by_name['client'].message_type = _OIDCCLIENT
_LISTOIDCCLIENTSRESPONSE.fields_by_name['clients'].message_type = _OIDCCLIENT
_UPDATEOIDCCLIENTREQUEST.fields_by_name['client'].message_type = _OIDCCLIENT
DESCRIPTOR.message_types_by_name['User'] = _USER
DESCRIPTOR.message_types_by_name['IdentityServerConfig'] = _IDENTITYSERVERCONFIG
DESCRIPTOR.message_types_by_name['SetIdentityServerConfigRequest'] = _SETIDENTITYSERVERCONFIGREQUEST
DESCRIPTOR.message_types_by_name['SetIdentityServerConfigResponse'] = _SETIDENTITYSERVERCONFIGRESPONSE
DESCRIPTOR.message_types_by_name['GetIdentityServerConfigRequest'] = _GETIDENTITYSERVERCONFIGREQUEST
DESCRIPTOR.message_types_by_name['GetIdentityServerConfigResponse'] = _GETIDENTITYSERVERCONFIGRESPONSE
DESCRIPTOR.message_types_by_name['IDPConnector'] = _IDPCONNECTOR
DESCRIPTOR.message_types_by_name['CreateIDPConnectorRequest'] = _CREATEIDPCONNECTORREQUEST
DESCRIPTOR.message_types_by_name['CreateIDPConnectorResponse'] = _CREATEIDPCONNECTORRESPONSE
DESCRIPTOR.message_types_by_name['UpdateIDPConnectorRequest'] = _UPDATEIDPCONNECTORREQUEST
DESCRIPTOR.message_types_by_name['UpdateIDPConnectorResponse'] = _UPDATEIDPCONNECTORRESPONSE
DESCRIPTOR.message_types_by_name['ListIDPConnectorsRequest'] = _LISTIDPCONNECTORSREQUEST
DESCRIPTOR.message_types_by_name['ListIDPConnectorsResponse'] = _LISTIDPCONNECTORSRESPONSE
DESCRIPTOR.message_types_by_name['GetIDPConnectorRequest'] = _GETIDPCONNECTORREQUEST
DESCRIPTOR.message_types_by_name['GetIDPConnectorResponse'] = _GETIDPCONNECTORRESPONSE
DESCRIPTOR.message_types_by_name['DeleteIDPConnectorRequest'] = _DELETEIDPCONNECTORREQUEST
DESCRIPTOR.message_types_by_name['DeleteIDPConnectorResponse'] = _DELETEIDPCONNECTORRESPONSE
DESCRIPTOR.message_types_by_name['OIDCClient'] = _OIDCCLIENT
DESCRIPTOR.message_types_by_name['CreateOIDCClientRequest'] = _CREATEOIDCCLIENTREQUEST
DESCRIPTOR.message_types_by_name['CreateOIDCClientResponse'] = _CREATEOIDCCLIENTRESPONSE
DESCRIPTOR.message_types_by_name['GetOIDCClientRequest'] = _GETOIDCCLIENTREQUEST
DESCRIPTOR.message_types_by_name['GetOIDCClientResponse'] = _GETOIDCCLIENTRESPONSE
DESCRIPTOR.message_types_by_name['ListOIDCClientsRequest'] = _LISTOIDCCLIENTSREQUEST
DESCRIPTOR.message_types_by_name['ListOIDCClientsResponse'] = _LISTOIDCCLIENTSRESPONSE
DESCRIPTOR.message_types_by_name['UpdateOIDCClientRequest'] = _UPDATEOIDCCLIENTREQUEST
DESCRIPTOR.message_types_by_name['UpdateOIDCClientResponse'] = _UPDATEOIDCCLIENTRESPONSE
DESCRIPTOR.message_types_by_name['DeleteOIDCClientRequest'] = _DELETEOIDCCLIENTREQUEST
DESCRIPTOR.message_types_by_name['DeleteOIDCClientResponse'] = _DELETEOIDCCLIENTRESPONSE
DESCRIPTOR.message_types_by_name['DeleteAllRequest'] = _DELETEALLREQUEST
DESCRIPTOR.message_types_by_name['DeleteAllResponse'] = _DELETEALLRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), {
'DESCRIPTOR' : _USER,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.User)
})
_sym_db.RegisterMessage(User)
IdentityServerConfig = _reflection.GeneratedProtocolMessageType('IdentityServerConfig', (_message.Message,), {
'DESCRIPTOR' : _IDENTITYSERVERCONFIG,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.IdentityServerConfig)
})
_sym_db.RegisterMessage(IdentityServerConfig)
SetIdentityServerConfigRequest = _reflection.GeneratedProtocolMessageType('SetIdentityServerConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _SETIDENTITYSERVERCONFIGREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.SetIdentityServerConfigRequest)
})
_sym_db.RegisterMessage(SetIdentityServerConfigRequest)
SetIdentityServerConfigResponse = _reflection.GeneratedProtocolMessageType('SetIdentityServerConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _SETIDENTITYSERVERCONFIGRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.SetIdentityServerConfigResponse)
})
_sym_db.RegisterMessage(SetIdentityServerConfigResponse)
GetIdentityServerConfigRequest = _reflection.GeneratedProtocolMessageType('GetIdentityServerConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYSERVERCONFIGREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetIdentityServerConfigRequest)
})
_sym_db.RegisterMessage(GetIdentityServerConfigRequest)
GetIdentityServerConfigResponse = _reflection.GeneratedProtocolMessageType('GetIdentityServerConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYSERVERCONFIGRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetIdentityServerConfigResponse)
})
_sym_db.RegisterMessage(GetIdentityServerConfigResponse)
IDPConnector = _reflection.GeneratedProtocolMessageType('IDPConnector', (_message.Message,), {
'DESCRIPTOR' : _IDPCONNECTOR,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.IDPConnector)
})
_sym_db.RegisterMessage(IDPConnector)
CreateIDPConnectorRequest = _reflection.GeneratedProtocolMessageType('CreateIDPConnectorRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEIDPCONNECTORREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.CreateIDPConnectorRequest)
})
_sym_db.RegisterMessage(CreateIDPConnectorRequest)
CreateIDPConnectorResponse = _reflection.GeneratedProtocolMessageType('CreateIDPConnectorResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEIDPCONNECTORRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.CreateIDPConnectorResponse)
})
_sym_db.RegisterMessage(CreateIDPConnectorResponse)
UpdateIDPConnectorRequest = _reflection.GeneratedProtocolMessageType('UpdateIDPConnectorRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEIDPCONNECTORREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.UpdateIDPConnectorRequest)
})
_sym_db.RegisterMessage(UpdateIDPConnectorRequest)
UpdateIDPConnectorResponse = _reflection.GeneratedProtocolMessageType('UpdateIDPConnectorResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEIDPCONNECTORRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.UpdateIDPConnectorResponse)
})
_sym_db.RegisterMessage(UpdateIDPConnectorResponse)
ListIDPConnectorsRequest = _reflection.GeneratedProtocolMessageType('ListIDPConnectorsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTIDPCONNECTORSREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.ListIDPConnectorsRequest)
})
_sym_db.RegisterMessage(ListIDPConnectorsRequest)
ListIDPConnectorsResponse = _reflection.GeneratedProtocolMessageType('ListIDPConnectorsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTIDPCONNECTORSRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.ListIDPConnectorsResponse)
})
_sym_db.RegisterMessage(ListIDPConnectorsResponse)
GetIDPConnectorRequest = _reflection.GeneratedProtocolMessageType('GetIDPConnectorRequest', (_message.Message,), {
'DESCRIPTOR' : _GETIDPCONNECTORREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetIDPConnectorRequest)
})
_sym_db.RegisterMessage(GetIDPConnectorRequest)
GetIDPConnectorResponse = _reflection.GeneratedProtocolMessageType('GetIDPConnectorResponse', (_message.Message,), {
'DESCRIPTOR' : _GETIDPCONNECTORRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetIDPConnectorResponse)
})
_sym_db.RegisterMessage(GetIDPConnectorResponse)
DeleteIDPConnectorRequest = _reflection.GeneratedProtocolMessageType('DeleteIDPConnectorRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEIDPCONNECTORREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteIDPConnectorRequest)
})
_sym_db.RegisterMessage(DeleteIDPConnectorRequest)
DeleteIDPConnectorResponse = _reflection.GeneratedProtocolMessageType('DeleteIDPConnectorResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEIDPCONNECTORRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteIDPConnectorResponse)
})
_sym_db.RegisterMessage(DeleteIDPConnectorResponse)
OIDCClient = _reflection.GeneratedProtocolMessageType('OIDCClient', (_message.Message,), {
'DESCRIPTOR' : _OIDCCLIENT,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.OIDCClient)
})
_sym_db.RegisterMessage(OIDCClient)
CreateOIDCClientRequest = _reflection.GeneratedProtocolMessageType('CreateOIDCClientRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEOIDCCLIENTREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.CreateOIDCClientRequest)
})
_sym_db.RegisterMessage(CreateOIDCClientRequest)
CreateOIDCClientResponse = _reflection.GeneratedProtocolMessageType('CreateOIDCClientResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEOIDCCLIENTRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.CreateOIDCClientResponse)
})
_sym_db.RegisterMessage(CreateOIDCClientResponse)
GetOIDCClientRequest = _reflection.GeneratedProtocolMessageType('GetOIDCClientRequest', (_message.Message,), {
'DESCRIPTOR' : _GETOIDCCLIENTREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetOIDCClientRequest)
})
_sym_db.RegisterMessage(GetOIDCClientRequest)
GetOIDCClientResponse = _reflection.GeneratedProtocolMessageType('GetOIDCClientResponse', (_message.Message,), {
'DESCRIPTOR' : _GETOIDCCLIENTRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.GetOIDCClientResponse)
})
_sym_db.RegisterMessage(GetOIDCClientResponse)
ListOIDCClientsRequest = _reflection.GeneratedProtocolMessageType('ListOIDCClientsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTOIDCCLIENTSREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.ListOIDCClientsRequest)
})
_sym_db.RegisterMessage(ListOIDCClientsRequest)
ListOIDCClientsResponse = _reflection.GeneratedProtocolMessageType('ListOIDCClientsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTOIDCCLIENTSRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.ListOIDCClientsResponse)
})
_sym_db.RegisterMessage(ListOIDCClientsResponse)
UpdateOIDCClientRequest = _reflection.GeneratedProtocolMessageType('UpdateOIDCClientRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEOIDCCLIENTREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.UpdateOIDCClientRequest)
})
_sym_db.RegisterMessage(UpdateOIDCClientRequest)
UpdateOIDCClientResponse = _reflection.GeneratedProtocolMessageType('UpdateOIDCClientResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEOIDCCLIENTRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.UpdateOIDCClientResponse)
})
_sym_db.RegisterMessage(UpdateOIDCClientResponse)
DeleteOIDCClientRequest = _reflection.GeneratedProtocolMessageType('DeleteOIDCClientRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEOIDCCLIENTREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteOIDCClientRequest)
})
_sym_db.RegisterMessage(DeleteOIDCClientRequest)
DeleteOIDCClientResponse = _reflection.GeneratedProtocolMessageType('DeleteOIDCClientResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEOIDCCLIENTRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteOIDCClientResponse)
})
_sym_db.RegisterMessage(DeleteOIDCClientResponse)
DeleteAllRequest = _reflection.GeneratedProtocolMessageType('DeleteAllRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEALLREQUEST,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteAllRequest)
})
_sym_db.RegisterMessage(DeleteAllRequest)
DeleteAllResponse = _reflection.GeneratedProtocolMessageType('DeleteAllResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEALLRESPONSE,
'__module__' : 'python_pachyderm.proto.v2.identity.identity_pb2'
# @@protoc_insertion_point(class_scope:identity_v2.DeleteAllResponse)
})
_sym_db.RegisterMessage(DeleteAllResponse)
DESCRIPTOR._options = None
_API = _descriptor.ServiceDescriptor(
name='API',
full_name='identity_v2.API',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1719,
serialized_end=3038,
methods=[
_descriptor.MethodDescriptor(
name='SetIdentityServerConfig',
full_name='identity_v2.API.SetIdentityServerConfig',
index=0,
containing_service=None,
input_type=_SETIDENTITYSERVERCONFIGREQUEST,
output_type=_SETIDENTITYSERVERCONFIGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetIdentityServerConfig',
full_name='identity_v2.API.GetIdentityServerConfig',
index=1,
containing_service=None,
input_type=_GETIDENTITYSERVERCONFIGREQUEST,
output_type=_GETIDENTITYSERVERCONFIGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateIDPConnector',
full_name='identity_v2.API.CreateIDPConnector',
index=2,
containing_service=None,
input_type=_CREATEIDPCONNECTORREQUEST,
output_type=_CREATEIDPCONNECTORRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateIDPConnector',
full_name='identity_v2.API.UpdateIDPConnector',
index=3,
containing_service=None,
input_type=_UPDATEIDPCONNECTORREQUEST,
output_type=_UPDATEIDPCONNECTORRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListIDPConnectors',
full_name='identity_v2.API.ListIDPConnectors',
index=4,
containing_service=None,
input_type=_LISTIDPCONNECTORSREQUEST,
output_type=_LISTIDPCONNECTORSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetIDPConnector',
full_name='identity_v2.API.GetIDPConnector',
index=5,
containing_service=None,
input_type=_GETIDPCONNECTORREQUEST,
output_type=_GETIDPCONNECTORRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteIDPConnector',
full_name='identity_v2.API.DeleteIDPConnector',
index=6,
containing_service=None,
input_type=_DELETEIDPCONNECTORREQUEST,
output_type=_DELETEIDPCONNECTORRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateOIDCClient',
full_name='identity_v2.API.CreateOIDCClient',
index=7,
containing_service=None,
input_type=_CREATEOIDCCLIENTREQUEST,
output_type=_CREATEOIDCCLIENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateOIDCClient',
full_name='identity_v2.API.UpdateOIDCClient',
index=8,
containing_service=None,
input_type=_UPDATEOIDCCLIENTREQUEST,
output_type=_UPDATEOIDCCLIENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetOIDCClient',
full_name='identity_v2.API.GetOIDCClient',
index=9,
containing_service=None,
input_type=_GETOIDCCLIENTREQUEST,
output_type=_GETOIDCCLIENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListOIDCClients',
full_name='identity_v2.API.ListOIDCClients',
index=10,
containing_service=None,
input_type=_LISTOIDCCLIENTSREQUEST,
output_type=_LISTOIDCCLIENTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteOIDCClient',
full_name='identity_v2.API.DeleteOIDCClient',
index=11,
containing_service=None,
input_type=_DELETEOIDCCLIENTREQUEST,
output_type=_DELETEOIDCCLIENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteAll',
full_name='identity_v2.API.DeleteAll',
index=12,
containing_service=None,
input_type=_DELETEALLREQUEST,
output_type=_DELETEALLRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_API)
DESCRIPTOR.services_by_name['API'] = _API
# @@protoc_insertion_point(module_scope)
| mit |
aam-at/tensorflow | tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py | 2 | 68344 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import sparse
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.stop_gradient(array_ops.where(math_ops.abs(dense_t) > 0))
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _swap(a, i, j):
a[i], a[j] = a[j], a[i]
def twist_matrix(matrix, permutation_indices):
"""Permute the rows and columns of a 2D or (batched) 3D Tensor."""
# Shuffle the rows and columns with the same permutation.
if matrix.shape.ndims == 2:
# Invert the permutation since `tf.gather` and `tf.gather_nd` need the
# mapping from each index `i` to the index that maps to `i`.
permutation_indices_inv = array_ops.invert_permutation(permutation_indices)
matrix = array_ops.gather(matrix, permutation_indices_inv, axis=0)
matrix = array_ops.gather(matrix, permutation_indices_inv, axis=1)
elif matrix.shape.ndims == 3:
permutation_indices_inv = map_fn.map_fn(array_ops.invert_permutation,
permutation_indices)
# For 3D Tensors, it's easy to shuffle the rows but not the columns. We
# permute the rows, transpose, permute the rows again, and transpose back.
batch_size = matrix.shape[0]
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], permutation_indices.shape)
for _ in range(2):
matrix = array_ops.gather_nd(
matrix,
array_ops.stack([batch_indices, permutation_indices_inv], axis=-1))
# Transpose the matrix, or equivalently, swap dimensions 1 and 2.
matrix = array_ops.transpose(matrix, perm=[0, 2, 1])
else:
raise ValueError("Input matrix must have rank 2 or 3. Got: {}".format(
matrix.shape.ndims))
return matrix
class CSRSparseMatrixOpsTest(test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=g-missing-super-call
cls._gpu_available = test_util.is_gpu_available()
# TODO(ebrevdo): This will work once we find a way to get rendezvous
# working for CSRSparseMatrix and can remove the HostMemory
# annotations for the other ops.
@test_util.run_in_graph_and_eager_modes
def DISABLEDtestFromProto(self):
if not self._gpu_available:
return
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.asarray([1.0, 5.0], dtype=np.float32)
a_dense_shape = np.asarray([5, 6], dtype=np.int64)
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
a_col_inds = a_csr_mat.indices
a_row_ptrs = a_csr_mat.indptr
# Format of SparseMatrix:
# type_name == "tensorflow::CSRSparseMatrix"
# metadata == b (validated)
# tensors == [dense_shape, row_ptrs, col_indices, values]
dense_shape_proto = tensor_util.make_tensor_proto(a_dense_shape)
row_ptrs_proto = tensor_util.make_tensor_proto(a_row_ptrs)
col_inds_proto = tensor_util.make_tensor_proto(a_col_inds)
values_proto = tensor_util.make_tensor_proto(a_values)
variant_tensor_data = tensor_pb2.VariantTensorDataProto(
type_name="tensorflow::CSRSparseMatrix",
metadata=np.asarray(True).tobytes(),
tensors=[
dense_shape_proto, row_ptrs_proto, col_inds_proto, values_proto
])
tensor_proto = tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto())
tensor_proto.variant_val.extend([variant_tensor_data])
a_sm = constant_op.constant(tensor_proto)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, type=dtypes.float32)
self.evaluate(a_rt)
@test_util.run_in_graph_and_eager_modes
def testSparseTensorConversion(self):
a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
a_values = [1.0, 5.0, -1.0, -2.0]
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
# Convert 2D SparseTensor to CSR Matrix
a_st = sparse_tensor.SparseTensor(a_indices, a_values, a_dense_shape)
a_st = math_ops.cast(a_st, dtypes.float32)
a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a_st.indices, a_st.values, a_st.dense_shape)
# Get row indices and columns for batch 0.
a_sm_row_ptrs, a_sm_col_inds, a_sm_values = (
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, 0, type=a_st.dtype))
a_sm_row_ptrs_values, a_sm_col_inds_values, a_sm_values_values = (
self.evaluate((a_sm_row_ptrs, a_sm_col_inds, a_sm_values)))
self.assertAllEqual(a_csr_mat.indices, a_sm_col_inds_values)
self.assertAllEqual(a_csr_mat.indptr, a_sm_row_ptrs_values)
self.assertAllClose(a_values, a_sm_values_values)
# Convert CSR Matrix to 2D SparseTensor
a_st_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
a_sm, type=a_st.dtype)
a_st_rt_value = self.evaluate(a_st_rt)
self.assertAllEqual(a_indices, a_st_rt_value.indices)
self.assertAllClose(a_values, a_st_rt_value.values)
self.assertAllEqual(a_dense_shape, a_st_rt_value.dense_shape)
# TODO(b/139491352): Add handle_data propagation to array_ops.identity.
@test_util.run_deprecated_v1
def testCSRSparseMatrixResourceVariable(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
a_sm = dense_to_csr_sparse_matrix(a_mats)
with ops.device("/gpu:0"):
v = variable_scope.get_variable("sm", initializer=a_sm, use_resource=True)
v_id = array_ops.identity(v)
self.assertEqual(
sparse_csr_matrix_ops.dense_shape_and_type(v_id).shape, a_mats.shape)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
v, type=dtypes.float32)
v_reassign = state_ops.assign(v, v_id).op
with self.assertRaisesOpError("Error while reading resource variable sm"):
self.evaluate(a_rt)
self.evaluate(v.initializer)
a_rt_value = self.evaluate(a_rt)
self.assertAllClose(a_mats, a_rt_value)
self.evaluate(v_reassign)
a_rt_reassigned_value = self.evaluate(a_rt)
self.assertAllClose(a_mats, a_rt_reassigned_value)
@test_util.run_in_graph_and_eager_modes
def testBatchSparseTensorConversion(self):
a_indices = np.array([[0, 0, 0], [0, 2, 3], [2, 0, 1]])
a_values = [1.0, 5.0, 6.0]
a_dense_shape = [3, 5, 6]
a_sparse_mats = [
sparse.coo_matrix(([1.0, 5.0], ([0, 2], [0, 3])),
shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([6.0], ([0], [1])), shape=a_dense_shape[1:])
]
a_csr_mats = [m.tocsr() for m in a_sparse_mats]
# Convert 3D SparseTensor to CSR Matrix
a_st = sparse_tensor.SparseTensor(a_indices, a_values, a_dense_shape)
a_st = math_ops.cast(a_st, dtypes.float32)
a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a_st.indices, a_st.values, a_st.dense_shape)
# Get row indices and columns for batches.
a_sm_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, i, type=a_st.dtype) for i in range(3)
]
a_sm_values = self.evaluate(a_sm_components)
for i, (a_sm_val, a_csr_mat) in enumerate(zip(a_sm_values, a_csr_mats)):
tf_logging.info("Comparing batch %d" % i)
self.assertAllEqual(a_csr_mat.indptr, a_sm_val.row_ptrs)
self.assertAllEqual(a_csr_mat.indices, a_sm_val.col_inds)
self.assertAllClose(a_csr_mat.data, a_sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
a_st_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
a_sm, type=a_st.dtype)
a_st_rt_value = self.evaluate(a_st_rt)
self.assertAllEqual(a_indices, a_st_rt_value.indices)
self.assertAllClose(a_values, a_st_rt_value.values)
self.assertAllEqual(a_dense_shape, a_st_rt_value.dense_shape)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseTensorConversion(self):
# Test two sets of conversions to check behavior of the ops in a
# concurrent environment (parallel executions of the ST -> SM ops).
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
mats = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(2)
]
csr_mats = [list(map(sparse.csr_matrix, mat)) for mat in mats]
mats_t = [ops.convert_to_tensor(mat) for mat in mats]
mats_locs = [array_ops.where(mat_t > 0) for mat_t in mats_t]
sparse_tensors = list()
for mat_t, mat_loc in zip(mats_t, mats_locs):
sparse_tensors.append(
sparse_tensor.SparseTensor(mat_loc,
array_ops.gather_nd(mat_t,
mat_loc), dense_shape))
sparse_matrices = [
sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
st.indices, st.values, st.dense_shape) for st in sparse_tensors
]
sm_nnz = [
sparse_csr_matrix_ops.sparse_matrix_nnz(sm) for sm in sparse_matrices
]
# Get row indices and columns for batches.
sm_components = list()
for sm in sparse_matrices:
sm_components.append([
sparse_csr_matrix_ops.csr_sparse_matrix_components(
sm, i, type=dtypes.float32) for i in range(dense_shape[0])
])
sm_nnz_values, sm_values = self.evaluate((sm_nnz, sm_components))
for i, (sm_values_i, csr_mats_i) in enumerate(zip(sm_values, csr_mats)):
for b, (sm_val, csr_mat) in enumerate(zip(sm_values_i, csr_mats_i)):
tf_logging.info("Comparing matrix %d batch %d" % (i, b))
self.assertEqual(csr_mat.nnz, sm_nnz_values[i][b])
self.assertAllEqual(csr_mat.indptr, sm_val.row_ptrs)
self.assertAllEqual(csr_mat.indices, sm_val.col_inds)
self.assertAllClose(csr_mat.data, sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
st_rt = [
sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
sm, type=dtypes.float32) for sm in sparse_matrices
]
st_values, st_rt_values = self.evaluate((sparse_tensors, st_rt))
for (st_value, st_rt_value) in zip(st_values, st_rt_values):
self.assertAllEqual(st_value.indices, st_rt_value.indices)
self.assertAllClose(st_value.values, st_rt_value.values)
self.assertAllEqual(dense_shape, st_rt_value.dense_shape)
@test_util.run_in_graph_and_eager_modes
def testDenseConversion(self):
a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
a_values = np.array([1.0, 5.0, -1.0, -2.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
a_dense = a_sparse_mat.todense()
# Convert 2D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
# Get row indices and columns for batch 0.
a_sm_row_ptrs, a_sm_col_inds, a_sm_values = (
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, 0, type=dtypes.float32))
a_sm_row_ptrs_values, a_sm_col_inds_values, a_sm_values_values = (
self.evaluate((a_sm_row_ptrs, a_sm_col_inds, a_sm_values)))
self.assertAllEqual(a_csr_mat.indices, a_sm_col_inds_values)
self.assertAllEqual(a_csr_mat.indptr, a_sm_row_ptrs_values)
self.assertAllClose(a_values, a_sm_values_values)
# Convert CSR Matrix to 2D dense matrix
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, dtypes.float32)
a_rt_value = self.evaluate(a_rt)
self.assertAllEqual(a_dense, a_rt_value)
@test_util.run_in_graph_and_eager_modes
def testBatchDenseConversion(self):
a_dense_shape = [4, 5, 6]
a_sparse_mats = [
sparse.coo_matrix(([1.0, 5.0], ([0, 2], [0, 3])),
shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([6.0], ([0], [1])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
]
a_csr_mats = [m.tocsr() for m in a_sparse_mats]
a_dense = np.asarray([m.todense() for m in a_sparse_mats], dtype=np.float32)
# Convert 3D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
# Get row indices and columns for batches.
a_sm_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, i, type=dtypes.float32) for i in range(3)
]
a_sm_values = self.evaluate(a_sm_components)
for i, (a_sm_val, a_csr_mat) in enumerate(zip(a_sm_values, a_csr_mats)):
tf_logging.info("Comparing batch %d" % i)
self.assertAllEqual(a_csr_mat.indptr, a_sm_val.row_ptrs)
self.assertAllEqual(a_csr_mat.indices, a_sm_val.col_inds)
self.assertAllClose(a_csr_mat.data, a_sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, type=dtypes.float32)
a_rt_value = self.evaluate(a_rt)
self.assertAllEqual(a_dense, a_rt_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchDenseConversion(self):
# Test two sets of conversions to check behavior of the ops in a
# concurrent environment (parallel executions of the ST -> SM
# ops).
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
mats = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(2)
]
csr_mats = [[sparse.csr_matrix(m) for m in mat] for mat in mats]
mats_t = [ops.convert_to_tensor(mat) for mat in mats]
mats_locs = [array_ops.where(mat_t > 0) for mat_t in mats_t]
sparse_matrices = [
sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(mat, mat_loc)
for (mat, mat_loc) in zip(mats_t, mats_locs)
]
sm_nnz = [
sparse_csr_matrix_ops.sparse_matrix_nnz(sm) for sm in sparse_matrices
]
# Get row indices and columns for batches.
sm_components = []
for sm in sparse_matrices:
sm_components.append([
sparse_csr_matrix_ops.csr_sparse_matrix_components(
sm, i, type=dtypes.float32) for i in range(dense_shape[0])
])
sm_nnz_values, sm_values = self.evaluate((sm_nnz, sm_components))
for i, (sm_values_i, csr_mats_i) in enumerate(zip(sm_values, csr_mats)):
for b, (sm_val, csr_mat) in enumerate(zip(sm_values_i, csr_mats_i)):
tf_logging.info("Comparing matrix %d batch %d" % (i, b))
self.assertEqual(csr_mat.nnz, sm_nnz_values[i][b])
self.assertAllEqual(csr_mat.indptr, sm_val.row_ptrs)
self.assertAllEqual(csr_mat.indices, sm_val.col_inds)
self.assertAllClose(csr_mat.data, sm_val.values)
# Convert CSR batched Matrix to 3D dense tensor
sm_rt = [
sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sm, type=dtypes.float32) for sm in sparse_matrices
]
sm_rt_values = self.evaluate(sm_rt)
for (mat, sm_rt_value) in zip(mats, sm_rt_values):
self.assertAllEqual(mat, sm_rt_value)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixAdd(self):
if not self._gpu_available:
return
if test.is_built_with_rocm():
self.skipTest("sparse-matrix-add op not supported on ROCm")
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[1, 0], [1, 4], [2, 3], [4, 1]])
b_values = np.array([1.0, 0.5, -5.0, 2.0]).astype(np.float32)
b_dense_shape = [5, 6]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
for (alpha, beta) in [(1.0, 1.0), (1.0, -1.0), (0.25, 0.5)]:
a_sum_b_sparse_mat = alpha * a_sparse_mat + beta * b_sparse_mat
# Convert 2D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
alpha = np.float32(alpha)
beta = np.float32(beta)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_value = self.evaluate(c_dense)
self.assertAllClose(a_sum_b_sparse_mat.todense(), c_dense_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixAdd(self):
if not self._gpu_available:
return
if test.is_built_with_rocm():
self.skipTest("sparse-matrix-add op not supported on ROCm")
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
b_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for (alpha, beta) in [(1.0, 1.0), (1.0, -1.0), (0.25, 0.5)]:
tf_logging.info("testLargeBatchSparseMatrixAdd, comparing "
"alpha, beta (%d, %d)" % (alpha, beta))
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
alpha = np.float32(alpha)
beta = np.float32(beta)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_value = self.evaluate(c_dense)
self.assertAllClose(c_dense_value, alpha * a_mats + beta * b_mats)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixMatMul(self):
for shapes in [[(5, 6), (6, 1)], [(5, 6), (6, 2)]]:
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = shapes[0]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
# Will multiply sparse a (shape=shapes[0]) by dense b (shape=shapes[1]).
b = np.random.randn(*shapes[1]).astype(np.float32)
a_sm = dense_to_csr_sparse_matrix(a_dense)
c = sparse_csr_matrix_ops.sparse_matrix_mat_mul(a=a_sm, b=b)
c_value = self.evaluate(c)
expected_c_value = a_sparse_mat.dot(b)
self.assertAllClose(expected_c_value, c_value)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixMatMulConjugateOutput(self):
for shapes in [[(5, 6), (6, 1)], [(5, 6), (6, 2)]]:
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0 + 1.j, 5.0 - 2.j]).astype(np.complex64)
a_dense_shape = shapes[0]
a_sparse_mat = sparse.coo_matrix(
(a_values, (a_indices[:, 0], a_indices[:, 1])), shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
# Will multiply sparse a (shape=shapes[0]) by dense b (shape=shapes[1]).
b = np.random.randn(*shapes[1]).astype(np.complex64)
a_sm = dense_to_csr_sparse_matrix(a_dense)
c = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a=a_sm, b=b, conjugate_output=True)
c_value = self.evaluate(c)
expected_c_value = self.evaluate(
math_ops.conj(test_util.matmul_without_tf32(a_dense, b)))
self.assertAllClose(expected_c_value, c_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMul(self):
dtypes_to_test = [np.float32, np.complex64]
sparsify = lambda m: m * (m > 0)
for dtype in dtypes_to_test:
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
for shapes in [[[53, 127, 65], [53, 65, 1]],
[[53, 127, 1], [53, 1, 65]],
[[53, 127, 65], [53, 65, 127]]]:
a_dense_shape = shapes[0]
b_dense_shape = shapes[1]
if transpose_a or adjoint_a:
_swap(a_dense_shape, -2, -1)
if transpose_b or adjoint_b:
_swap(b_dense_shape, -2, -1)
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(dtype)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(dtype)
tf_logging.info(
"testLargeBatchSparseMatrixMatMul transpose_a %s transpose_b "
"%s adjoint_a %s adjoint_b %s" %
(transpose_a, transpose_b, adjoint_a, adjoint_b))
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm,
b_mats,
transpose_output=False,
conjugate_output=False,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_dense_t = test_util.matmul_without_tf32(
a_mats,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
self.assertAllEqual(c_dense_t.shape, c_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(
c_t_value, c_dense_t_value, rtol=1e-6, atol=2e-5)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMulTransposed(self):
dtypes_to_test = [np.float32]
if not test.is_built_with_rocm():
# complex types is not supported on the ROCm platform
dtypes_to_test += [np.complex64]
if test.is_built_with_rocm():
# TODO(rocm): fix this
# This test is currently failing on the ROCm platform
# Ren-enable it once the fix is available
self.skipTest("hipSPARSE all failure on the ROCm platform")
sparsify = lambda m: m * (m > 0)
for dtype in dtypes_to_test:
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
for shapes in [[[53, 127, 65], [53, 65, 1]],
[[53, 127, 1], [53, 1, 65]],
[[53, 127, 65], [53, 65, 127]]]:
a_dense_shape = shapes[0]
b_dense_shape = shapes[1]
if transpose_a or adjoint_a:
_swap(a_dense_shape, -2, -1)
if transpose_b or adjoint_b:
_swap(b_dense_shape, -2, -1)
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(dtype)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(dtype)
tf_logging.info(
"testLargeBatchSparseMatrixMatMul transpose_a %s transpose_b "
"%s adjoint_a %s adjoint_b %s" %
(transpose_a, transpose_b, adjoint_a, adjoint_b))
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm,
b_mats,
transpose_output=True,
conjugate_output=False,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Example: t(adj(a) . b) = t(b) . conj(a)
c_dense_t = test_util.matmul_without_tf32(
math_ops.conj(b_mats) if adjoint_b else b_mats,
math_ops.conj(a_mats) if adjoint_a else a_mats,
transpose_a=not (transpose_b or adjoint_b),
transpose_b=not (transpose_a or adjoint_a),
adjoint_a=False,
adjoint_b=False)
self.assertAllEqual(c_t.shape, c_dense_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(
c_t_value, c_dense_t_value, rtol=1e-6, atol=2e-5)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMulConjugate(self):
if test.is_built_with_rocm():
# complex types are not yet supported on the ROCm platform
self.skipTest("complex type not supported on ROCm")
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
b_dense_shape = [53, 127, 67]
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(np.complex64)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(np.complex64)
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm, b_mats, conjugate_output=True)
c_dense_t = math_ops.conj(test_util.matmul_without_tf32(a_mats, b_mats))
self.assertAllEqual(c_t.shape, c_dense_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(c_t_value, c_dense_t_value, atol=1e-5, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixSparseMatMul(self):
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[0, 0], [3, 0], [3, 1]])
b_values = np.array([2.0, 7.0, 8.0]).astype(np.float32)
b_dense_shape = [6, 7]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a=a_sm, b=b_sm, type=dtypes.float32)
c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_sm_dense_value = self.evaluate(c_sm_dense)
expected_c_value = a_sparse_mat.dot(b_sparse_mat).todense()
self.assertAllClose(expected_c_value, c_sm_dense_value)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixSparseMatMul_NumericZerosNotPruned(self):
# Tests that numeric zeros appearing from the sparse-sparse matrix
# multiplication are not pruned from the sparse structural
a_indices = np.array([[0, 0], [0, 2]])
a_values = np.array([2.0, -1.0]).astype(np.float32)
a_dense_shape = [2, 3]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[0, 1], [2, 1]])
b_values = np.array([3.0, 6.0]).astype(np.float32)
b_dense_shape = [3, 2]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
# Convert to CSRSparseMatrix while removing numeric zeros from the
# structural representation.
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
# Compute the matmul.
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a=a_sm, b=b_sm, type=dtypes.float32)
c_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(c_sm)
c_nnz_value = self.evaluate(c_nnz)
# Expect that there is a single numeric zero at index (0, 1) if zeros are
# not pruned, since 2.0 * 3.0 + (-1.0) * 6.0 = 0.0.
self.assertAllClose(1, c_nnz_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixSparseMatMul(self):
sparsify = lambda m: m * (m > 0)
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
a_dense_shape = ([53, 127, 65]
if transpose_a or adjoint_a else [53, 65, 127])
b_dense_shape = ([53, 67, 127]
if transpose_b or adjoint_b else [53, 127, 67])
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b_mats = sparsify(np.random.randn(*b_dense_shape).astype(np.float32))
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_sm,
b_sm,
type=dtypes.float32,
transpose_a=transpose_a,
adjoint_a=adjoint_a,
transpose_b=transpose_b,
adjoint_b=adjoint_b)
c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_t = test_util.matmul_without_tf32(
a_mats,
b_mats,
transpose_a=transpose_a,
adjoint_a=adjoint_a,
transpose_b=transpose_b,
adjoint_b=adjoint_b)
c_dense_t_value, c_sm_dense_value = self.evaluate(
(c_dense_t, c_sm_dense))
self.assertAllClose(c_sm_dense_value, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchRegisteredAddN(self):
if not self._gpu_available:
return
if test.is_built_with_rocm():
# sparse-matrix-add op is not yet supported on the ROCm platform
self.skipTest("sparse-matrix-add op not supported on ROCm")
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
matrices = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(16)
]
sparse_matrices = [dense_to_csr_sparse_matrix(mat) for mat in matrices]
sparse_matrices_sum = math_ops.add_n(sparse_matrices)
sparse_matrices_sum_dense = \
sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_matrices_sum, dtypes.float32)
sparse_matrices_sum_dense_value = self.evaluate(sparse_matrices_sum_dense)
# Ensure that the dense (numpy) sum across all batches matches the result
# of add_n converted back to dense.
expected_sum = np.sum(matrices, axis=0)
self.assertAllClose(expected_sum, sparse_matrices_sum_dense_value)
@test_util.run_in_graph_and_eager_modes
def testCSRZeros(self):
if not self._gpu_available:
return
a_dense_shape = [65, 127]
b_dense_shape = [53, 127, 67]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
# Check both rank-2 and rank-3 tensors.
a_sm = sparse_csr_matrix_ops.sparse_matrix_zeros(
a_dense_shape, type=dtype)
b_sm = sparse_csr_matrix_ops.sparse_matrix_zeros(
b_dense_shape, type=dtype)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(a_sm, type=dtype)
b_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(b_sm, type=dtype)
a_rt_value, b_rt_value = self.evaluate((a_rt, b_rt))
self.assertAllEqual(a_rt_value, np.zeros(a_dense_shape))
self.assertAllEqual(b_rt_value, np.zeros(b_dense_shape))
@test_util.run_in_graph_and_eager_modes
def testLargeBatchZerosLike(self):
if not self._gpu_available:
return
batch_size = 53
rows = 128
cols = 67
dense_shape = [batch_size, rows, cols]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
sparse_matrices = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtype)
zeros_like_sparse_matrices = array_ops.zeros_like(sparse_matrices)
zeros_like_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
zeros_like_sparse_matrices, i, type=dtype)
for i in range(batch_size)
]
zeros_like_components_values = self.evaluate(zeros_like_components)
for component in zeros_like_components_values:
self.assertAllEqual(component.row_ptrs, np.zeros(rows + 1, np.int32))
self.assertAllEqual(component.col_inds, np.empty([0], np.int32))
self.assertAllEqual(component.values, np.empty([0],
dtype.as_numpy_dtype))
@test_util.run_in_graph_and_eager_modes
def testTranspose(self):
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
mats = sparsify(
(np.random.randn(*dense_shape) +
1.j * np.random.randn(*dense_shape))).astype(dtype.as_numpy_dtype)
for conjugate in False, True:
expected = np.transpose(mats)
if conjugate:
expected = np.conj(expected)
matrices = math_ops.cast(mats, dtype)
sparse_matrices = dense_to_csr_sparse_matrix(matrices)
transpose_sparse_matrices = \
sparse_csr_matrix_ops.sparse_matrix_transpose(
sparse_matrices, conjugate=conjugate, type=dtype)
dense_transposed = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
transpose_sparse_matrices, dtype)
dense_transposed_values = self.evaluate(dense_transposed)
self.assertAllClose(expected, dense_transposed_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchTranspose(self):
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
mats = sparsify(
(np.random.randn(*dense_shape) +
1.j * np.random.randn(*dense_shape))).astype(dtype.as_numpy_dtype)
expected = np.transpose(mats, (0, 2, 1))
for conjugate in False, True:
if conjugate:
expected = np.conj(expected)
matrices = math_ops.cast(mats, dtype)
sparse_matrices = dense_to_csr_sparse_matrix(matrices)
transpose_sparse_matrices = \
sparse_csr_matrix_ops.sparse_matrix_transpose(
sparse_matrices, conjugate=conjugate, type=dtype)
dense_transposed = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
transpose_sparse_matrices, dtype)
dense_transposed_values = self.evaluate(dense_transposed)
self.assertAllClose(expected, dense_transposed_values)
@test_util.run_in_graph_and_eager_modes
def testSoftmax(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
logits = sparsify(np.random.randn(*dense_shape))
logits_with_ninf = np.copy(logits)
logits_with_ninf[logits == 0] = -np.inf
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
logits_t = math_ops.cast(logits, dtype)
logits_t_with_ninf = math_ops.cast(logits_with_ninf, dtype)
expected = nn_ops.softmax(logits_t_with_ninf)
sparse_logits_t = dense_to_csr_sparse_matrix(logits_t)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtype)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtype)
dense_softmax_values, expected_values = self.evaluate(
(dense_softmax, expected))
self.assertAllClose(expected_values, dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmax(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
logits = sparsify(np.random.randn(*dense_shape))
logits_with_ninf = np.copy(logits)
logits_with_ninf[logits == 0] = -np.inf
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
logits_t = math_ops.cast(logits, dtype)
logits_t_with_ninf = math_ops.cast(logits_with_ninf, dtype)
expected = nn_ops.softmax(logits_t_with_ninf)
sparse_logits_t = dense_to_csr_sparse_matrix(logits_t)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtype)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtype)
dense_softmax_values, expected_values = self.evaluate(
(dense_softmax, expected))
self.assertAllClose(expected_values, dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxEmpty(self):
if not self._gpu_available:
return
dense_shape = [53, 65, 127]
sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtypes.float32)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtypes.float32)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtypes.float32)
dense_softmax_values = self.evaluate(dense_softmax)
self.assertAllEqual(
np.zeros_like(dense_softmax_values), dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testSoftmaxGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
softmax = sparsify(np.random.randn(*dense_shape))
grad_softmax = sparsify(np.random.randn(*dense_shape))
expected = (
(grad_softmax - np.sum(grad_softmax * softmax, -1, keepdims=True)) *
softmax)
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
softmax_t = math_ops.cast(softmax, dtype)
grad_softmax_t = math_ops.cast(grad_softmax, dtype)
softmax_sparse = dense_to_csr_sparse_matrix(softmax_t)
grad_softmax_sparse = dense_to_csr_sparse_matrix(grad_softmax_t)
gradients_sparse = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
softmax_sparse, grad_softmax_sparse, dtype)
dense_gradients = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_sparse, dtype)
dense_gradients_values = self.evaluate((dense_gradients))
self.assertAllClose(expected, dense_gradients_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
softmax = sparsify(np.random.randn(*dense_shape))
grad_softmax = sparsify(np.random.randn(*dense_shape))
expected = (
(grad_softmax - np.sum(grad_softmax * softmax, -1, keepdims=True)) *
softmax)
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
softmax_t = math_ops.cast(softmax, dtype)
grad_softmax_t = math_ops.cast(grad_softmax, dtype)
softmax_sparse = dense_to_csr_sparse_matrix(softmax_t)
grad_softmax_sparse = dense_to_csr_sparse_matrix(grad_softmax_t)
gradients_sparse = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
softmax_sparse, grad_softmax_sparse, dtype)
dense_gradients = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_sparse, dtype)
dense_gradients_values = self.evaluate((dense_gradients))
self.assertAllClose(expected, dense_gradients_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxGradEmpty(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
not_empty = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
sparse_empty = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtypes.float32)
sparse_not_empty = dense_to_csr_sparse_matrix(not_empty)
gradients_empty_softmax = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_empty, sparse_not_empty, dtypes.float32)
gradients_empty_grad_softmax = (
sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_not_empty, sparse_empty, dtypes.float32))
gradients_empty_both = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_empty, sparse_empty, dtypes.float32)
ges = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_softmax, dtypes.float32)
gegs = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_grad_softmax, dtypes.float32)
geb = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_both, dtypes.float32)
ges_v, gegs_v, geb_v = self.evaluate((ges, gegs, geb))
for v in (ges_v, gegs_v, geb_v):
self.assertAllEqual(np.zeros(dense_shape), v)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchConj(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (np.real(m) > 0)
dense_shape = [53, 65, 127]
matrices = (
sparsify(np.random.randn(*dense_shape)) +
1j * np.random.randn(*dense_shape))
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
matrices_t = matrices.astype(dtype.as_numpy_dtype)
expected = np.conj(matrices_t)
sparse_matrices = dense_to_csr_sparse_matrix(matrices_t)
conj_sparse_matrices = math_ops.conj(sparse_matrices)
dense_conj_matrices = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
conj_sparse_matrices, dtype)
conj_values = self.evaluate(dense_conj_matrices)
self.assertAllClose(expected, conj_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMulScalar(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b = np.float32(3.5)
expected = a_mats * b
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mul(a_sm, b)
c_dense_t = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_t, dtypes.float32)
c_dense_t_value = self.evaluate(c_dense_t)
self.assertAllClose(expected, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMulVec(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b = np.random.randn(53, 1, 1).astype(np.float32)
expected = a_mats * b
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mul(a_sm, b)
c_dense_t = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_t, dtypes.float32)
c_dense_t_value = self.evaluate(c_dense_t)
self.assertAllClose(expected, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testSparseCholesky(self):
dense_matrix = np.array([
[2, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0],
[1, 1, 7, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 1, 0, 5, 0],
[0, 0, 2, 0, 1, 6],
]).astype(np.complex128)
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
with test_util.force_cpu():
if dtype.is_complex:
dense_matrix += 0.5j * np.tril(dense_matrix, -1)
sparse_matrix = dense_to_csr_sparse_matrix(
math_ops.cast(dense_matrix, dtype))
# Obtain the Sparse Cholesky factor using AMD Ordering for reducing
# fill-in.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrices = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtype))
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrices, dtype)
# Compute L * Lh where L is the Sparse Cholesky factor.
verification = test_util.matmul_without_tf32(
dense_cholesky, array_ops.transpose(dense_cholesky, conjugate=True))
verification = twist_matrix(verification, ordering_amd)
# Assert that input matrix A satisfies A = L * Lh.
verification_values = self.evaluate(verification)
full_dense_matrix = (
dense_matrix +
np.conjugate(np.transpose(np.tril(dense_matrix, -1))))
self.assertAllClose(full_dense_matrix, verification_values)
@test_util.run_in_graph_and_eager_modes
def testBatchSparseCholesky(self):
dense_mat = np.array([
# A diagonal matrix.
[
[1, 0, 0, 0], #
[0, 2, 0, 0], #
[0, 0, 3, 0], #
[0, 0, 0, 4],
], #
# A tridiagonal hermitian matrix.
[
[5 + 0j, 1 + 0j, 0 + 0j, 0 + 0j], #
[1 + 0j, 4 + 0j, 1 + 2j, 0 + 0j], #
[0 + 0j, 1 - 2j, 9 + 0j, 3 - 3j], #
[0 + 0j, 0 + 0j, 3 + 3j, 7 + 0j],
], #
# A diagonal matrix with a corner element; for which
# OrderingAMD returns a non-identity permutation.
[
[1, 0, 0, 1.], #
[0, 2, 0, 0.], #
[0, 0, 3, 0.], #
[1, 0, 0, 4.],
] #
]).astype(np.complex128)
data_types = [dtypes.float32, dtypes.float64]
if not test.is_built_with_rocm():
# complex type is not supported on the ROCm platform
data_types += [dtypes.complex64, dtypes.complex128]
for dtype in data_types:
sparse_matrix = dense_to_csr_sparse_matrix(
math_ops.cast(dense_mat, dtype))
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtype))
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrix, dtype)
# Compute L * Lh.
verification = test_util.matmul_without_tf32(
dense_cholesky,
array_ops.transpose(dense_cholesky, perm=[0, 2, 1], conjugate=True))
verification = twist_matrix(verification, ordering_amd)
verification_values = self.evaluate(verification)
self.assertAllClose(
dense_mat.astype(dtype.as_numpy_dtype), verification_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseCholesky(self):
sparsity = 0.1
sparsify = lambda m: m * (m > 1 - sparsity)
batch_size = 53
num_rows = 147
dense_shape = [batch_size, num_rows, num_rows]
dense_matrix = sparsify(np.random.uniform(size=dense_shape)).astype(
np.float32)
# Create a "random" SPD matrix, by choosing each entry of A between
# 0 and 1 at the specified density, and computing 0.5(A + At) + n*I.
# This ensures diagonal dominance which implies positive-definiteness.
dense_matrix = (
0.5 *
(dense_matrix + array_ops.transpose(dense_matrix, perm=[0, 2, 1])) +
num_rows * linalg_ops.eye(dense_shape[-1], batch_shape=[batch_size]))
# Compute the fill-in reducing permutation and use it to perform
# the Sparse Cholesky factorization.
sparse_matrix = dense_to_csr_sparse_matrix(dense_matrix)
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = \
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32)
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrix, dtypes.float32)
# Compute L * Lh.
verification = test_util.matmul_without_tf32(
dense_cholesky, array_ops.transpose(dense_cholesky, perm=[0, 2, 1]))
verification = twist_matrix(verification, ordering_amd)
verification_values = self.evaluate(verification)
self.assertAllClose(dense_matrix, verification_values, atol=1e-5, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testSparseCholesky_InvalidMatrix(self):
# Verify that non-SPD matrices result in an Invalid Argument error.
invalid_matrices = [
# zero matrix.
np.array([
[0., 0., 0., 0.], #
[0., 0., 0., 0.], #
[0., 0., 0., 0.], #
[0., 0., 0., 0.] #
]),
# zero diagonal entry.
np.array([
[9., 0., 5., 0.], #
[0., 0., 0., 1.], #
[5., 0., 8., 0.], #
[0., 1., 0., 7.] #
]),
# not positive definite.
np.array([
[2., -2., 0., 0.], #
[-2., 2., 0., 0.], #
[0., 0., 3., -3.], #
[0., 0., -3., 3.] #
]),
]
with test_util.force_cpu():
for invalid_matrix in invalid_matrices:
with self.assertRaises(errors.InvalidArgumentError):
sparse_matrix = dense_to_csr_sparse_matrix(
invalid_matrix.astype(np.float32))
# Compute the fill-in reducing permutation and use it to perform
# the Sparse Cholesky factorization.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrices = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32))
# Convert the Cholesky factor to a dense matrix to be evaluated.
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrices, type=dtypes.float32)
self.evaluate(dense_cholesky)
@test_util.run_in_graph_and_eager_modes
def testOrderingAMD(self):
num_rows = 6
# An SPD matrix where AMD ordering can reduce fill-in for Cholesky factor.
dense_matrix = np.array([
[7, 0, 0, 0, 0, 0],
[1, 4, 0, 0, 0, 0],
[1, 1, 3, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[2, 0, 0, 0, 5, 0],
[1, 2, 2, 0, 0, 6],
]).astype(np.float32)
with test_util.force_cpu():
sparse_matrix = dense_to_csr_sparse_matrix(dense_matrix)
# Obtain the Sparse Cholesky factor with the identity permutation as the
# fill-in reducing ordering.
cholesky_without_ordering = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, math_ops.range(num_rows), type=dtypes.float32))
cholesky_without_ordering_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(
cholesky_without_ordering)
# Obtain the Sparse Cholesky factor using AMD Ordering for reducing
# fill-in.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_with_amd = sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32)
cholesky_with_amd_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(
cholesky_with_amd)
(ordering_amd_value, cholesky_with_amd_nnz_value,
cholesky_without_ordering_nnz_value) = self.evaluate(
[ordering_amd, cholesky_with_amd_nnz, cholesky_without_ordering_nnz])
# AMD ordering should return a valid permutation.
self.assertAllClose(np.arange(num_rows), np.sort(ordering_amd_value))
# Check that cholesky with AMD ordering has a strictly lower nonzero count
# for this matrix.
self.assertLess(cholesky_with_amd_nnz_value,
cholesky_without_ordering_nnz_value)
class CSRSparseMatrixOpsBenchmark(test.Benchmark):
def benchmark_sparse_matrix_mat_mul_gpu(self):
if not test_util.is_gpu_available():
return
sparsify = lambda m: array_ops.where(m > 2, m, array_ops.zeros_like(m))
# XW, X dense and W sparse
# X is shaped [{1, 8, 16}, 2000]
# W is shaped [2000, 4000]
for batch_size in [1, 8, 16]:
x_dense_shape = [batch_size, 2000]
w_dense_shape = [2000, 4000]
with ops.Graph().as_default(), ops.device("/gpu:0"):
x_mats = random_ops.random_normal(x_dense_shape, dtype=dtypes.float32)
w_mats = sparsify(
random_ops.random_normal(w_dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(w_mats))[0]
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(w_dense_shape)
w_sm = dense_to_csr_sparse_matrix(w_mats)
with ops.name_scope("w_sm_var"):
w_sm_var = variable_scope.get_variable(
"sm", initializer=w_sm, use_resource=True)
w_sm_var_v = w_sm_var.read_value()
with ops.name_scope("w_var"):
w_var = variable_scope.get_variable(
"sm_dense", initializer=w_mats, use_resource=True)
w_var_v = w_var.read_value()
with ops.name_scope("b"):
x = variable_scope.get_variable(
"b", initializer=x_mats, use_resource=True)
x_v = x.read_value()
# X*W = (W'*X')'
xw_sparse = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
w_sm_var_v,
x_v,
transpose_a=True,
transpose_b=True,
transpose_output=True)
xw_dense = math_ops.matmul(x_v, w_var_v)
with session.Session() as sess:
self.evaluate(
[w_var.initializer, w_sm_var.initializer, x.initializer])
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_mat_mul_gpu_%s_W_2000x4000_batch_size_%d")
self.run_op_benchmark(
sess,
xw_sparse.op,
name=name_template % ("sparse", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
self.run_op_benchmark(
sess,
xw_dense.op,
name=name_template % ("dense", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_matrix_mat_vec_mul(self):
# num_rows, device, transpose.
cases = [
[2000, CPU, False],
[8000, CPU, False],
[12000, CPU, False],
[2000, CPU, True],
[8000, CPU, True],
[12000, CPU, True],
]
seed = 42
for num_rows, device, transpose in cases:
if device == GPU and not test_util.is_gpu_available():
continue
for num_threads in [1, 2, 4, 6, 8, 10]:
device_str = "cpu" if device == CPU else "gpu"
w_dense_shape = [num_rows, num_rows]
x_dense_shape = [num_rows, 1]
with ops.Graph().as_default(), ops.device(device):
random_seed.set_random_seed(seed)
x = random_ops.random_normal(x_dense_shape, dtype=dtypes.float32)
w_np = sparse.rand(
w_dense_shape[0],
w_dense_shape[1],
density=0.01,
dtype=np.float32,
random_state=np.random.RandomState(seed))
w_st = sparse_tensor.SparseTensor(
zip(w_np.row, w_np.col), w_np.data, w_np.shape)
w_st = sparse_ops.sparse_reorder(w_st)
nnz = array_ops.shape(w_st.values)[0]
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(w_np.shape)
w_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
w_st.indices, w_st.values, w_st.dense_shape)
xw_sparse_matrix = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
w_sm,
x,
transpose_a=transpose,
transpose_b=False,
transpose_output=False)
xw_sparse_tensor = sparse_ops.sparse_tensor_dense_matmul(
w_st, x, adjoint_a=transpose, adjoint_b=False)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = sess.run((nnz, ratio))
name_template = ("mat_vec_mul_%s_%s_W_%d_transpose_%s_threads_%d")
self.run_op_benchmark(
sess,
xw_sparse_matrix.op,
name=name_template %
(device_str, "sparse_matrix", num_rows, transpose, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=10)
self.run_op_benchmark(
sess,
xw_sparse_tensor.op,
name=name_template %
(device_str, "sparse_tensor", num_rows, transpose, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=10)
def benchmark_sparse_matrix_sparse_matmul(self):
density = 0.05
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 250, 250]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
x_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
y_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(x_mats))[0] + array_ops.shape(
array_ops.where(y_mats))[0]
ratio = math_ops.cast(nnz,
dtypes.float32) / (2 * np.prod(dense_shape))
x_sm = dense_to_csr_sparse_matrix(x_mats)
y_sm = dense_to_csr_sparse_matrix(y_mats)
xy_sparse = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
x_sm, y_sm, type=dtypes.float32)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_sparse_matmul_%s_N_%d_batch_size_%d_threads_%d"
)
device_str = "cpu" if device == CPU else "gpu"
self.run_op_benchmark(
sess,
xy_sparse.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_dense_conversion(self):
sparsity = 0.05
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 750, 750]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
mats = random_ops.random_uniform(dense_shape, dtype=dtypes.float32)
mats_locs = array_ops.where(mats > 1.0 - sparsity)
sparse_matrices = sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(
mats, mats_locs)
dense_matrices = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_matrices, type=dtypes.float32)
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrices))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
device_str = "cpu" if device == CPU else "gpu"
name_template = (
"dense_to_sparse_matrix_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
sparse_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
name_template = (
"sparse_matrix_to_dense_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
dense_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
def benchmark_sparse_cholesky(self):
# TODO(anudhyan): Use conversions from SparseTensor instead of to get this
# benchmark working for larger matrices. For this to work without GPU, we
# need to write CPU kernels for SparseTensor conversions.
num_rows = 500
density = 0.01
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, num_rows, num_rows]
with ops.Graph().as_default(), ops.device(CPU):
# Create a "random" SPD matrix, by choosing each entry of A between
# 0 and 1 at the specified density, and computing 0.5(A + At) + n*I.
# This ensures diagonal dominance which implies positive-definiteness.
dense_matrix = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
spd_dense_matrix = (
0.5 *
(dense_matrix + array_ops.transpose(dense_matrix, perm=[0, 2, 1]))
+ num_rows *
linalg_ops.eye(dense_shape[-1], batch_shape=[batch_size]))
# Convert to SparseMatrix and invoke Sparse Cholesky factorization
# with AMD Ordering.
sparse_matrix = dense_to_csr_sparse_matrix(spd_dense_matrix)
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32))
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrix))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
ordering_amd_name_template = (
"sparse_matrix_ordering_amd_cpu_N_%d_batch_size_%d_threads_%d")
sparse_cholesky_name_template = (
"sparse_matrix_sparse_cholesky_cpu_N_%d_batch_size_%d_threads_%d")
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
self.run_op_benchmark(
sess,
ordering_amd.op,
name=ordering_amd_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
self.run_op_benchmark(
sess,
cholesky_sparse_matrix.op,
name=sparse_cholesky_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
if __name__ == "__main__":
test.main()
| apache-2.0 |
zhaohuaw/stock-logistics-warehouse | __unported__/stock_inventory_extended/__init__.py | 49 | 1072 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import stock
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
j3parker/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/exception_in_transfer_wsh.py | 499 | 1816 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Exception in web_socket_transfer_data().
"""
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise Exception('Intentional Exception for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
jansohn/pyload | module/plugins/crypter/EmbeduploadCom.py | 2 | 2285 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Crypter import Crypter
from module.network.HTTPRequest import BadHeader
class EmbeduploadCom(Crypter):
__name__ = "EmbeduploadCom"
__type__ = "crypter"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?embedupload\.com/\?d=.+'
__config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True ),
("subfolder_per_pack", "bool", "Create a subfolder for each package" , True ),
("preferedHoster" , "str" , "Prefered hoster list (bar-separated)", "embedupload"),
("ignoredHoster" , "str" , "Ignored hoster list (bar-separated)" , "" )]
__description__ = """EmbedUpload.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
LINK_PATTERN = r'<div id="(.+?)".*?>\s*<a href="(.+?)" target="_blank" (?:class="DownloadNow"|style="color:red")>'
def decrypt(self, pyfile):
self.html = self.load(pyfile.url)
tmp_links = []
m = re.findall(self.LINK_PATTERN, self.html)
if m is not None:
prefered_set = set(self.get_config('preferedHoster').split('|'))
prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set)
self.log_debug("PF: %s" % prefered_set)
tmp_links.extend(x[1] for x in m if x[0] in prefered_set)
self.urls = self.get_location(tmp_links)
if not self.urls:
ignored_set = set(self.get_config('ignoredHoster').split('|'))
ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set)
self.log_debug("IG: %s" % ignored_set)
tmp_links.extend(x[1] for x in m if x[0] not in ignored_set)
self.urls = self.get_location(tmp_links)
def get_location(self, tmp_links):
new_links = []
for link in tmp_links:
try:
header = self.load(link, just_header=True)
if 'location' in header:
new_links.append(header['location'])
except BadHeader:
pass
return new_links
| gpl-3.0 |
djtaylor/cloudscape-DEPRECATED | python/cloudscape/common/iptc/xtables.py | 2 | 42886 | # -*- coding: utf-8 -*-
import ctypes as ct
import os
import sys
import weakref
import version
from util import find_library
XT_INV_PROTO = 0x40 # invert the sense of PROTO
NFPROTO_UNSPEC = 0
NFPROTO_IPV4 = 2
NFPROTO_ARP = 3
NFPROTO_BRIDGE = 7
NFPROTO_IPV6 = 10
NFPROTO_DECNET = 12
NFPROTO_NUMPROTO = 6
XTF_DONT_LOAD = 0x00
XTF_DURING_LOAD = 0x01
XTF_TRY_LOAD = 0x02
XTF_LOAD_MUST_SUCCEED = 0x03
XTOPT_INVERT = 1 << 0
XTOPT_MAND = 1 << 1
XTOPT_MULTI = 1 << 2
XTOPT_PUT = 1 << 3
XTOPT_NBO = 1 << 4
_WORDLEN = ct.sizeof(ct.c_long)
_XT_FUNCTION_MAXNAMELEN = 30
def xt_align(sz):
return ((sz + (_WORDLEN - 1)) & ~(_WORDLEN - 1))
class xt_counters(ct.Structure):
"""This class is a representation of the C struct xt_counters."""
_fields_ = [("pcnt", ct.c_uint64), # packet counter
("bcnt", ct.c_uint64)] # byte counter
class xt_entry_target_user(ct.Structure):
_fields_ = [("target_size", ct.c_uint16),
("name", ct.c_char * (_XT_FUNCTION_MAXNAMELEN - 1)),
("revision", ct.c_uint8)]
class xt_entry_target_u(ct.Union):
_fields_ = [("user", xt_entry_target_user),
("target_size", ct.c_uint16)] # full length
class xt_entry_target(ct.Structure):
"""This class is a representation of the C struct xt_entry_target."""
_fields_ = [("u", xt_entry_target_u),
("data", ct.c_ubyte * 0)]
class xt_entry_match_user(ct.Structure):
_fields_ = [("match_size", ct.c_uint16),
("name", ct.c_char * (_XT_FUNCTION_MAXNAMELEN - 1)),
("revision", ct.c_uint8)]
class xt_entry_match_u(ct.Union):
_fields_ = [("user", xt_entry_match_user),
("match_size", ct.c_uint16)] # full length
class xt_entry_match(ct.Structure):
"""This class is a representation of the C struct xt_entry_match."""
_fields_ = [("u", xt_entry_match_u),
("data", ct.c_ubyte * 0)]
class xtables_globals(ct.Structure):
_fields_ = [("option_offset", ct.c_uint),
("program_name", ct.c_char_p),
("program_version", ct.c_char_p),
("orig_opts", ct.c_void_p),
("opts", ct.c_void_p),
("exit_err", ct.CFUNCTYPE(None, ct.c_int, ct.c_char_p))]
# struct used by getopt()
class option(ct.Structure):
_fields_ = [("name", ct.c_char_p),
("has_arg", ct.c_int),
("flag", ct.POINTER(ct.c_int)),
("val", ct.c_int)]
class xt_option_entry(ct.Structure):
_fields_ = [("name", ct.c_char_p),
("type", ct.c_int),
("id", ct.c_uint),
("excl", ct.c_uint),
("also", ct.c_uint),
("flags", ct.c_uint),
("ptroff", ct.c_uint),
("size", ct.c_size_t),
("min", ct.c_uint),
("max", ct.c_uint)]
class _U1(ct.Union):
_fields_ = [("match", ct.POINTER(ct.POINTER(xt_entry_match))),
("target", ct.POINTER(ct.POINTER(xt_entry_target)))]
class nf_inet_addr(ct.Union):
_fields_ = [("all", ct.c_uint32 * 4),
("ip", ct.c_uint32),
("ip6", ct.c_uint32 * 4),
("in", ct.c_uint32),
("in6", ct.c_uint8 * 16)]
class _S1(ct.Structure):
_fields_ = [("haddr", nf_inet_addr),
("hmask", nf_inet_addr),
("hlen", ct.c_uint8)]
class _S2(ct.Structure):
_fields_ = [("tos_value", ct.c_uint8),
("tos_mask", ct.c_uint8)]
class _S3(ct.Structure):
_fields_ = [("mark", ct.c_uint32),
("mask", ct.c_uint32)]
class _U_val(ct.Union):
_anonymous_ = ("s1", "s2", "s3")
_fields_ = [("u8", ct.c_uint8),
("u8_range", ct.c_uint8 * 2),
("syslog_level", ct.c_uint8),
("protocol", ct.c_uint8),
("u16", ct.c_uint16),
("u16_range", ct.c_uint16 * 2),
("port", ct.c_uint16),
("port_range", ct.c_uint16 * 2),
("u32", ct.c_uint32),
("u32_range", ct.c_uint32 * 2),
("u64", ct.c_uint64),
("u64_range", ct.c_uint64 * 2),
("double", ct.c_double),
("s1", _S1),
("s2", _S2),
("s3", _S3),
("ethermac", ct.c_uint8 * 6)]
class xt_option_call(ct.Structure):
_anonymous_ = ("u",)
_fields_ = [("arg", ct.c_char_p),
("ext_name", ct.c_char_p),
("entry", ct.POINTER(xt_option_entry)),
("data", ct.c_void_p),
("xflags", ct.c_uint),
("invert", ct.c_uint8),
("nvals", ct.c_uint8),
("val", _U_val),
("u", _U1),
("xt_entry", ct.c_void_p),
("udata", ct.c_void_p)]
class xt_fcheck_call(ct.Structure):
_fields_ = [("ext_name", ct.c_char_p),
("data", ct.c_void_p),
("udata", ct.c_void_p),
("xflags", ct.c_uint)]
class _xtables_match_v1(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_match))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert, unsigned int
# *flags, const void *entry, struct xt_entry_match **match)
("parse", ct.CFUNCTYPE(ct.c_int, ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_match)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the match iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match), ct.c_int)),
# saves the match info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
("option_offset", ct.c_uint),
("m", ct.POINTER(xt_entry_match)),
("mflags", ct.c_uint),
("loaded", ct.c_uint)]
x6_parse = None
x6_fcheck = None
x6_options = None
_xtables_match_v2 = _xtables_match_v1
_xtables_match_v4 = _xtables_match_v1
_xtables_match_v5 = _xtables_match_v1
class _xtables_match_v6(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_match))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert, unsigned int
# *flags, const void *entry, struct xt_entry_match **match)
("parse", ct.CFUNCTYPE(ct.c_int, ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_match)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the match iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match), ct.c_int)),
# saves the match info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
("option_offset", ct.c_uint),
("m", ct.POINTER(xt_entry_match)),
("mflags", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_match_v7(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_match))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert, unsigned int
# *flags, const void *entry, struct xt_entry_match **match)
("parse", ct.CFUNCTYPE(ct.c_int, ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_match)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the match iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match), ct.c_int)),
# saves the match info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("m", ct.POINTER(xt_entry_match)),
("mflags", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_match_v9(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("real_name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_match))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert, unsigned int
# *flags, const void *entry, struct xt_entry_match **match)
("parse", ct.CFUNCTYPE(ct.c_int, ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_match)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the match iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match), ct.c_int)),
# saves the match info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("m", ct.POINTER(xt_entry_match)),
("mflags", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_match_v10(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("real_name", ct.c_char_p),
("revision", ct.c_uint8),
("ext_flags", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_match))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert, unsigned int
# *flags, const void *entry, struct xt_entry_match **match)
("parse", ct.CFUNCTYPE(ct.c_int, ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_match)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the match iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match), ct.c_int)),
# saves the match info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_match))),
# Print match name or alias
("alias", ct.CFUNCTYPE(ct.c_char_p, ct.POINTER(xt_entry_match))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("m", ct.POINTER(xt_entry_match)),
("mflags", ct.c_uint),
("loaded", ct.c_uint)]
class xtables_match(ct.Union):
_fields_ = [("v1", _xtables_match_v1),
("v2", _xtables_match_v2),
# Apparently v3 was skipped
("v4", _xtables_match_v4),
("v5", _xtables_match_v5),
("v6", _xtables_match_v6),
("v7", _xtables_match_v7),
# Apparently v8 was skipped
("v9", _xtables_match_v9),
("v10", _xtables_match_v10)]
class _xtables_target_v1(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_target))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert,
# unsigned int *flags, const void *entry,
# struct xt_entry_target **target)
("parse", ct.CFUNCTYPE(ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_target)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the target iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target), ct.c_int)),
# saves the target info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
("option_offset", ct.c_uint),
("t", ct.POINTER(xt_entry_target)),
("tflags", ct.c_uint),
("used", ct.c_uint),
("loaded", ct.c_uint)]
x6_parse = None
x6_fcheck = None
x6_options = None
_xtables_target_v2 = _xtables_target_v1
_xtables_target_v4 = _xtables_target_v1
_xtables_target_v5 = _xtables_target_v1
class _xtables_target_v6(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_target))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert,
# unsigned int *flags, const void *entry,
# struct xt_entry_target **target)
("parse", ct.CFUNCTYPE(ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_target)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the target iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target), ct.c_int)),
# saves the target info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
("option_offset", ct.c_uint),
("t", ct.POINTER(xt_entry_target)),
("tflags", ct.c_uint),
("used", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_target_v7(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_target))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert,
# unsigned int *flags, const void *entry,
# struct xt_entry_target **target)
("parse", ct.CFUNCTYPE(ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_target)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the target iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target), ct.c_int)),
# saves the target info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("t", ct.POINTER(xt_entry_target)),
("tflags", ct.c_uint),
("used", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_target_v9(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("real_name", ct.c_char_p),
("revision", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_target))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert,
# unsigned int *flags, const void *entry,
# struct xt_entry_target **target)
("parse", ct.CFUNCTYPE(ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_target)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the target iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target), ct.c_int)),
# saves the target info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("t", ct.POINTER(xt_entry_target)),
("tflags", ct.c_uint),
("used", ct.c_uint),
("loaded", ct.c_uint)]
class _xtables_target_v10(ct.Structure):
_fields_ = [("version", ct.c_char_p),
("next", ct.c_void_p),
("name", ct.c_char_p),
("real_name", ct.c_char_p),
("revision", ct.c_uint8),
("ext_flags", ct.c_uint8),
("family", ct.c_uint16),
("size", ct.c_size_t),
("userspacesize", ct.c_size_t),
("help", ct.CFUNCTYPE(None)),
("init", ct.CFUNCTYPE(None, ct.POINTER(xt_entry_target))),
# fourth parameter entry is struct ipt_entry for example
# int (*parse)(int c, char **argv, int invert,
# unsigned int *flags, const void *entry,
# struct xt_entry_target **target)
("parse", ct.CFUNCTYPE(ct.c_int,
ct.POINTER(ct.c_char_p), ct.c_int,
ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.POINTER(
xt_entry_target)))),
("final_check", ct.CFUNCTYPE(None, ct.c_uint)),
# prints out the target iff non-NULL: put space at end
# first parameter ip is struct ipt_ip * for example
("print", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target), ct.c_int)),
# saves the target info in parsable form to stdout.
# first parameter ip is struct ipt_ip * for example
("save", ct.CFUNCTYPE(None, ct.c_void_p,
ct.POINTER(xt_entry_target))),
# Print target name or alias
("alias", ct.CFUNCTYPE(ct.c_char_p, ct.POINTER(xt_entry_target))),
# pointer to list of extra command-line options
("extra_opts", ct.POINTER(option)),
# introduced with the new iptables API
("x6_parse", ct.CFUNCTYPE(None, ct.POINTER(xt_option_call))),
("x6_fcheck", ct.CFUNCTYPE(None, ct.POINTER(xt_fcheck_call))),
("x6_options", ct.POINTER(xt_option_entry)),
# size of per-extension instance extra "global" scratch space
("udata_size", ct.c_size_t),
# ignore these men behind the curtain:
("udata", ct.c_void_p),
("option_offset", ct.c_uint),
("t", ct.POINTER(xt_entry_target)),
("tflags", ct.c_uint),
("used", ct.c_uint),
("loaded", ct.c_uint)]
class xtables_target(ct.Union):
_fields_ = [("v1", _xtables_target_v1),
("v2", _xtables_target_v2),
# Apparently v3 was skipped
("v4", _xtables_target_v4),
("v5", _xtables_target_v5),
("v6", _xtables_target_v6),
("v7", _xtables_target_v7),
# Apparently v8 was skipped
("v9", _xtables_target_v9),
("v10", _xtables_target_v10)]
class XTablesError(Exception):
"""Raised when an xtables call fails for some reason."""
_libc, _ = find_library("c")
_optind = ct.c_long.in_dll(_libc, "optind")
_optarg = ct.c_char_p.in_dll(_libc, "optarg")
_lib_xtables, _xtables_version = find_library("xtables")
_xtables_libdir = os.getenv("XTABLES_LIBDIR")
if _xtables_libdir is None:
import os.path
for xtdir in ["/lib/xtables", "/lib64/xtables", "/usr/lib/xtables",
"/usr/lib/iptables", "/usr/lib64/xtables",
"/usr/lib64/iptables", "/usr/local/lib/xtables"]:
if os.path.isdir(xtdir):
_xtables_libdir = xtdir
break
if _xtables_libdir is None:
raise XTablesError("can't find directory with extensions; "
"please set XTABLES_LIBDIR")
_lib_xtwrapper, _ = find_library("xtwrapper")
_throw = _lib_xtwrapper.throw_exception
_wrap_parse = _lib_xtwrapper.wrap_parse
_wrap_parse.restype = ct.c_int
_wrap_parse.argtypes = [ct.c_void_p, ct.c_int, ct.POINTER(ct.c_char_p),
ct.c_int, ct.POINTER(ct.c_uint), ct.c_void_p,
ct.POINTER(ct.c_void_p)]
_wrap_save = _lib_xtwrapper.wrap_save
_wrap_save.restype = ct.c_void_p
_wrap_save.argtypes = [ct.c_void_p, ct.c_void_p, ct.c_void_p]
_wrap_uintfn = _lib_xtwrapper.wrap_uintfn
_wrap_uintfn.restype = ct.c_int
_wrap_uintfn.argtypes = [ct.c_void_p, ct.c_uint]
_wrap_voidfn = _lib_xtwrapper.wrap_voidfn
_wrap_voidfn.restype = ct.c_int
_wrap_voidfn.argtypes = [ct.c_void_p]
_wrap_x6fn = _lib_xtwrapper.wrap_x6fn
_wrap_x6fn.restype = ct.c_int
_wrap_x6fn.argtypes = [ct.c_void_p, ct.c_void_p]
_kernel_version = ct.c_int.in_dll(_lib_xtwrapper, 'kernel_version')
_get_kernel_version = _lib_xtwrapper.get_kernel_version
_get_kernel_version()
def _xt_exit(status, *args):
_throw(status)
_EXIT_FN = ct.CFUNCTYPE(None, ct.c_int, ct.c_char_p)
_xt_exit = _EXIT_FN(_xt_exit)
def preserve_globals(fn):
def new(*args):
obj = args[0]
obj._restore_globals()
try:
ret = fn(*args)
except Exception:
obj._save_globals()
raise
obj._save_globals()
return ret
return new
class xtables(object):
_xtables_init_all = _lib_xtables.xtables_init_all
_xtables_init_all.restype = ct.c_int
_xtables_init_all.argtypes = [ct.POINTER(xtables_globals), ct.c_uint8]
_xtables_find_match = _lib_xtables.xtables_find_match
_xtables_find_match.restype = ct.POINTER(xtables_match)
_xtables_find_match.argtypes = [ct.c_char_p, ct.c_int, ct.c_void_p]
_xtables_find_target = _lib_xtables.xtables_find_target
_xtables_find_target.restype = ct.POINTER(xtables_target)
_xtables_find_target.argtypes = [ct.c_char_p, ct.c_int]
_xtables_set_nfproto = _lib_xtables.xtables_set_nfproto
_xtables_set_nfproto.restype = None
_xtables_set_nfproto.argtypes = [ct.c_uint8]
_xtables_xt_params = ct.c_void_p.in_dll(_lib_xtables, "xt_params")
_xtables_matches = (ct.c_void_p.in_dll(_lib_xtables, "xtables_matches"))
try:
_xtables_pending_matches = (ct.c_void_p.in_dll(_lib_xtables,
"xtables_pending_matches"))
except ValueError:
_xtables_pending_matches = ct.POINTER(None)
_xtables_targets = (ct.c_void_p.in_dll(_lib_xtables, "xtables_targets"))
try:
_xtables_pending_targets = (ct.c_void_p.in_dll(_lib_xtables,
"xtables_pending_targets"))
except ValueError:
_xtables_pending_targets = ct.POINTER(None)
_cache = weakref.WeakValueDictionary()
def __new__(cls, proto):
obj = xtables._cache.get(proto, None)
if not obj:
obj = object.__new__(cls)
xtables._cache[proto] = obj
obj._xtinit(proto)
return obj
def _xtinit(self, proto):
self.proto = proto
self._xt_globals = xtables_globals()
self._xt_globals.option_offset = 0
self._xt_globals.program_name = version.__pkgname__
self._xt_globals.program_version = version.__version__
self._xt_globals.orig_opts = None
self._xt_globals.opts = None
self._xt_globals.exit_err = _xt_exit
thismodule = sys.modules[__name__]
matchname = "_xtables_match_v%d" % (_xtables_version)
targetname = "_xtables_target_v%d" % (_xtables_version)
try:
self._match_struct = getattr(thismodule, matchname)
self._target_struct = getattr(thismodule, targetname)
except:
raise XTablesError("unknown xtables version %d" %
(_xtables_version))
self._loaded_exts = []
# make sure we're initializing with clean state
self._xt_params = ct.c_void_p(None).value
self._matches = ct.c_void_p(None).value
self._pending_matches = ct.c_void_p(None).value
self._targets = ct.c_void_p(None).value
self._pending_targets = ct.c_void_p(None).value
rv = xtables._xtables_init_all(ct.pointer(self._xt_globals), proto)
if rv:
raise XTablesError("xtables_init_all() failed: %d" % (rv))
self._save_globals()
def __repr__(self):
return "XTables for protocol %d" % (self.proto)
def _save_globals(self):
# Save our per-protocol libxtables global variables, and set them to
# NULL so that we don't interfere with other protocols.
null = ct.c_void_p(None)
self._xt_params = xtables._xtables_xt_params.value
xtables._xtables_xt_params.value = null.value
self._matches = xtables._xtables_matches.value
xtables._xtables_matches.value = null.value
self._pending_matches = xtables._xtables_pending_matches.value
xtables._xtables_pending_matches.value = null.value
self._targets = xtables._xtables_targets.value
xtables._xtables_targets.value = null.value
self._pending_targets = xtables._xtables_pending_targets.value
xtables._xtables_pending_targets.value = null.value
def _restore_globals(self):
# Restore per-protocol libxtables global variables saved in
# _save_globals().
xtables._xtables_set_nfproto(self.proto)
xtables._xtables_xt_params.value = self._xt_params
xtables._xtables_matches.value = self._matches
xtables._xtables_pending_matches.value = self._pending_matches
xtables._xtables_targets.value = self._targets
xtables._xtables_pending_targets.value = self._pending_targets
def _check_extname(self, name):
if name in ["", "ACCEPT", "DROP", "QUEUE", "RETURN"]:
name = "standard"
return name
def _loaded(self, name):
self._loaded_exts.append(name)
def _is_loaded(self, name):
if name in self._loaded_exts:
return True
else:
return False
def _get_initfn_from_lib(self, name, lib):
try:
initfn = getattr(lib, "libxt_%s_init" % (name))
except AttributeError:
prefix = self._get_prefix()
initfn = getattr(lib, "%s%s_init" % (prefix, name), None)
return initfn
def _try_extinit(self, name, lib):
try:
if type(lib) != ct.CDLL:
lib = ct.CDLL(lib)
fn = self._get_initfn_from_lib(name, lib)
if fn:
_wrap_voidfn(fn)
return True
except:
pass
return False
def _get_prefix(self):
if self.proto == NFPROTO_IPV4:
return "libipt_"
elif self.proto == NFPROTO_IPV6:
return "libip6t_"
else:
raise XTablesError("Unknown protocol %d" % (self.proto))
def _try_register(self, name):
if self._try_extinit(name, _lib_xtables):
return
prefix = self._get_prefix()
libs = [os.path.join(_xtables_libdir, "libxt_" + name + ".so"),
os.path.join(_xtables_libdir, prefix + name + ".so")]
for lib in libs:
if self._try_extinit(name, lib):
return
@preserve_globals
def find_match(self, name):
name = self._check_extname(name)
match = xtables._xtables_find_match(name, XTF_TRY_LOAD, None)
if not match:
self._try_register(name)
match = xtables._xtables_find_match(name, XTF_TRY_LOAD, None)
if not match:
return match
self._loaded(name)
return ct.cast(match, ct.POINTER(self._match_struct))
@preserve_globals
def find_target(self, name):
name = self._check_extname(name)
target = xtables._xtables_find_target(name, XTF_TRY_LOAD)
if not target:
self._try_register(name)
target = xtables._xtables_find_target(name, XTF_TRY_LOAD)
if not target:
return target
self._loaded(name)
return ct.cast(target, ct.POINTER(self._target_struct))
@preserve_globals
def save(self, module, ip, ptr):
_wrap_save(module.save, ct.cast(ct.pointer(ip), ct.c_void_p), ptr)
def _option_lookup(self, entries, name):
for e in entries:
if not e.name:
break
if e.name == name:
return e
return None
def _parse(self, module, argv, inv, flags, entry, ptr):
for opt in module.extra_opts:
if opt.name == argv[0]:
rv = _wrap_parse(module.parse, opt.val, argv, inv, flags,
entry, ptr)
if rv != 1:
raise ValueError("invalid value %s" % (argv[1]))
return
elif not opt.name:
break
raise AttributeError("invalid parameter %s" % (argv[0]))
# Dispatch arguments to the appropriate parse function, based upon the
# extension's choice of API.
@preserve_globals
def parse_target(self, argv, invert, t, fw, ptr):
_optarg.value = argv[1]
_optind.value = 2
x6_options = None
x6_parse = None
try:
# new API?
x6_options = t.x6_options
x6_parse = t.x6_parse
except AttributeError:
pass
if x6_options and x6_parse:
# new API
entry = self._option_lookup(t.x6_options, argv[0])
if not entry:
raise XTablesError("%s: no such parameter %s" % (t.name,
argv[0]))
cb = xt_option_call()
cb.entry = ct.pointer(entry)
cb.arg = _optarg
cb.invert = ct.c_uint8(invert.value)
cb.ext_name = t.name
cb.data = ct.cast(t.t[0].data, ct.c_void_p)
cb.xflags = 0
cb.target = ct.pointer(t.t)
cb.xt_entry = ct.cast(fw, ct.c_void_p)
cb.udata = t.udata
rv = _wrap_x6fn(t.x6_parse, ct.pointer(cb))
if rv != 0:
raise XTablesError("%s: parameter error %d (%s)" % (t.name, rv,
argv[1]))
t.tflags |= cb.xflags
return
# old API
flags = ct.pointer(ct.c_uint(0))
self._parse(t, argv, invert, flags, fw, ptr)
t.tflags |= flags[0]
# Dispatch arguments to the appropriate parse function, based upon the
# extension's choice of API.
@preserve_globals
def parse_match(self, argv, invert, m, fw, ptr):
_optarg.value = argv[1]
_optind.value = 2
x6_options = None
x6_parse = None
try:
# new API?
x6_options = m.x6_options
x6_parse = m.x6_parse
except AttributeError:
pass
if x6_options and x6_parse:
# new API
entry = self._option_lookup(m.x6_options, argv[0])
if not entry:
raise XTablesError("%s: no such parameter %s" % (m.name,
argv[0]))
cb = xt_option_call()
cb.entry = ct.pointer(entry)
cb.arg = _optarg
cb.invert = ct.c_uint8(invert.value)
cb.ext_name = m.name
cb.data = ct.cast(m.m[0].data, ct.c_void_p)
cb.xflags = 0
cb.match = ct.pointer(m.m)
cb.xt_entry = ct.cast(fw, ct.c_void_p)
cb.udata = m.udata
rv = _wrap_x6fn(m.x6_parse, ct.pointer(cb))
if rv != 0:
raise XTablesError("%s: parameter error %d (%s)" % (m.name, rv,
argv[1]))
m.mflags |= cb.xflags
return
# old API
flags = ct.pointer(ct.c_uint(0))
self._parse(m, argv, invert, flags, fw, ptr)
m.mflags |= flags[0]
| gpl-3.0 |
Clyde-fare/cclib | src/cclib/method/cspa.py | 1 | 3974 | # This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""C-squared population analysis."""
import random
import numpy
from .population import Population
class CSPA(Population):
"""The C-squared population analysis."""
def __init__(self, *args):
# Call the __init__ method of the superclass.
super(CSPA, self).__init__(logname="CSPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "CSPA of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'CSPA("%s")' % (self.data)
def calculate(self, indices=None, fupdate=0.05):
"""Perform the C squared population analysis.
Inputs:
indices - list of lists containing atomic orbital indices of fragments
"""
# Do we have the needed info in the parser?
if not hasattr(self.data, "mocoeffs"):
self.logger.error("Missing mocoeffs")
return False
if not hasattr(self.data, "nbasis"):
self.logger.error("Missing nbasis")
return False
if not hasattr(self.data, "homos"):
self.logger.error("Missing homos")
return False
self.logger.info("Creating attribute aoresults: array[3]")
# Determine number of steps, and whether process involves beta orbitals.
unrestricted = (len(self.data.mocoeffs)==2)
nbasis = self.data.nbasis
self.aoresults = []
alpha = len(self.data.mocoeffs[0])
self.aoresults.append(numpy.zeros([alpha, nbasis], "d"))
nstep = alpha
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# Intialize progress if available.
if self.progress:
self.progress.initialize(nstep)
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "C^2 Population Analysis")
submocoeffs = self.data.mocoeffs[spin][i]
scale = numpy.inner(submocoeffs, submocoeffs)
tempcoeffs = numpy.multiply(submocoeffs, submocoeffs)
tempvec = tempcoeffs/scale
self.aoresults[spin][i] = numpy.divide(tempcoeffs, scale).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super(CSPA, self).partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if not unrestricted:
self.fragcharges = numpy.multiply(self.fragcharges, 2)
return True
if __name__ == "__main__":
import doctest, cspa
doctest.testmod(cspa, verbose=False)
| lgpl-2.1 |
corso-python-prato/share-system-team2 | server/test_server.py | 1 | 62918 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
server test module
Every TestCase class should use the <TEST_DIR> directory. To do it, just call 'setup_test_dir()' in the setUp method and
'tear_down_test_dir()' in the tearDown one.
"""
import unittest
import os
import base64
import shutil
import urlparse
import json
import logging
import hashlib
import tempfile
import random
import string
import mock
import server
from server import userpath2serverpath
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_BAD_REQUEST = 400
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_CONFLICT = 409
start_dir = os.getcwd()
TEST_DIR = 'server_test'
SERVER_API = '/API/V1/'
SERVER_FILES_API = urlparse.urljoin(SERVER_API, 'files/')
SERVER_ACTIONS_API = urlparse.urljoin(SERVER_API, 'actions/')
SERVER_SHARES_API = urlparse.urljoin(SERVER_API, 'shares/')
# Set server logging verbosity
server_verbosity = logging.WARNING # change it manually if you want change the server verbosity
server.logger.setLevel(server_verbosity)
# Very basic logging configuration for this test module:
logging.basicConfig(level=logging.WARNING)
# Test-user account details
REGISTERED_TEST_USER = '[email protected]', 'Mail_85'
USR, PW = REGISTERED_TEST_USER
SHAREUSR = 'pyboxshareuser'
SHAREUSRPW = '12345'
def pick_rand_str(length, possible_chars=string.ascii_lowercase):
return ''.join([random.choice(possible_chars) for _ in xrange(length)])
def pick_rand_email():
res = '{}@{}.{}'.format(pick_rand_str(random.randrange(3, 12)),
pick_rand_str(random.randrange(3, 8)),
pick_rand_str(random.randrange(2, 4)))
return res
# \b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b
def make_basicauth_headers(user, pwd):
return {'Authorization': 'Basic ' + base64.b64encode('{}:{}'.format(user, pwd))}
def _create_file(username, user_relpath, content, update_userdata=True):
"""
Create an user file with path <user_relpath> and content <content>
and return it's last modification time (== creation time).
:param username: str
:param user_relpath: str
:param content: str
:return: float
"""
filepath = userpath2serverpath(username, user_relpath)
dirpath = os.path.dirname(filepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
with open(filepath, 'wb') as fp:
fp.write(content)
mtime = server.now_timestamp()
if update_userdata:
server.userdata[username][server.SNAPSHOT][user_relpath] = [mtime,
server.calculate_file_md5(open(filepath, 'rb'))]
return mtime
def create_user_dir(username):
"""
Create user directory (must not exist)
:param username:
:return:
"""
os.makedirs(userpath2serverpath(username))
def build_tstuser_dir(username):
"""
Create a directory with files and return its structure
in a list.
:param username: str
:return: tuple
"""
# md5("foo") = "acbd18db4cc2f85cedef654fccc4a4d8"
# md5("bar") = "37b51d194a7513e45b56f6524f2d51f2"
# md5("spam") = "e09f6a7593f8ae3994ea57e1117f67ec"
file_contents = [
('spamfile', 'spam', 'e09f6a7593f8ae3994ea57e1117f67ec'),
(os.path.join('subdir', 'foofile.txt'), 'foo', 'acbd18db4cc2f85cedef654fccc4a4d8'),
(os.path.join('subdir', 'barfile.md'), 'bar', '37b51d194a7513e45b56f6524f2d51f2'),
]
user_root = userpath2serverpath(username)
# If directory already exists, destroy it
if os.path.isdir(user_root):
shutil.rmtree(user_root)
os.mkdir(user_root)
expected_timestamp = None
expected_snapshot = {}
for user_filepath, content, md5 in file_contents:
expected_timestamp = int(_create_file(username, user_filepath, content))
expected_snapshot[user_filepath] = [expected_timestamp, unicode(md5)]
return expected_timestamp, expected_snapshot
def _manually_create_user(username, pw):
"""
Create an *active* user, its server directory, and return its userdata dictionary.
:param username: str
:param pw: str
:return: dict
"""
enc_pass = server._encrypt_password(pw)
# Create user directory with default structure (use the server function)
user_dir_state = server.init_user_directory(username)
single_user_data = user_dir_state
single_user_data[server.USER_IS_ACTIVE] = True
single_user_data[server.PWD] = enc_pass
single_user_data[server.USER_CREATION_TIME] = server.now_timestamp()
single_user_data['shared_with_me'] = {}
single_user_data['shared_with_others'] = {}
single_user_data['shared_files'] = {}
server.userdata[username] = single_user_data
return single_user_data
def _manually_remove_user(username): # TODO: make this from server module?
"""
Remove user dictionary from server <userdata>, if exist,
and remove its directory from disk, if exist.
:param username: str
"""
if USR in server.userdata:
server.userdata.pop(username)
# Remove user directory if exists!
user_dirpath = userpath2serverpath(USR)
if os.path.exists(user_dirpath):
shutil.rmtree(user_dirpath)
logging.debug('"%s" user directory removed' % user_dirpath)
def setup_test_dir():
"""
Create (if needed) <TEST_DIR> directory starting from current directory and change current directory to the new one.
"""
try:
os.mkdir(TEST_DIR)
except OSError:
pass
os.chdir(TEST_DIR)
def tear_down_test_dir():
"""
Return to initial directory and remove the <TEST_DIR> one.
"""
os.chdir(start_dir)
shutil.rmtree(TEST_DIR)
def _make_temp_file():
"""
Create temporary file for testing
NB: the file sent with test_client() must be with name
:return: First value is a FileObject and second value the relative md5
"""
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('this is a test')
temp_file.seek(0)
test_md5 = hashlib.md5('this is a test').hexdigest()
return temp_file, test_md5
@unittest.skipUnless(hasattr(server, 'configure_email'),
'This unit test is based on "server.configure_email" function which is missing. \
It could be due to a refactoring, so this test should be updated or removed.')
class TestServerConfigureEmail(unittest.TestCase):
def test_no_exception(self):
# Control: must not raise exceptions
server.configure_email()
def test_missing_email_settings_file(self):
"""
Missing emailSettings.ini must raise a ServerConfigurationError,
when calling server.configure_email.
"""
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = False
self.assertRaises(
server.ServerConfigurationError,
server.configure_email,
)
# Control: ensure is called only once, since we mocked every os.path.exists calls
mock_exists.assert_called_once_with(server.EMAIL_SETTINGS_FILEPATH)
class TestRequests(unittest.TestCase):
def setUp(self):
"""
Create an user and create the test file to test the download from server.
"""
setup_test_dir()
self.app = server.app.test_client()
self.app.testing = True
_manually_remove_user(USR)
_manually_create_user(USR, PW)
def tearDown(self):
_manually_remove_user(USR)
tear_down_test_dir()
def test_update_passwordmeter_terms(self):
import passwordmeter
terms_file = tempfile.NamedTemporaryFile()
terms = ['dsgdfgsfgsr\n',
'sdfdffdgdgfs\n',
'sfsdgdhgdsdfgdg\n',
'dsffdgdfgdfgdf\n'
]
for term in terms:
terms_file.write(term)
# We have to give filename to the function update_passwordmeter_terms
name_of_file = terms_file.name
terms_file.seek(0)
server.update_passwordmeter_terms(name_of_file)
for term in terms:
self.assertIn(term, passwordmeter.common10k)
def test_files_post_with_auth(self):
"""
Test for authenticated upload.
"""
user_relative_upload_filepath = 'testupload/testfile.txt'
upload_test_url = SERVER_FILES_API + user_relative_upload_filepath
uploaded_filepath = userpath2serverpath(USR, user_relative_upload_filepath)
assert not os.path.exists(uploaded_filepath), '"{}" file is existing'.format(uploaded_filepath)
# Create temporary file for test
test_file, test_md5 = _make_temp_file()
try:
test = self.app.post(upload_test_url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': test_md5},
follow_redirects=True)
finally:
test_file.close()
self.assertEqual(test.status_code, server.HTTP_CREATED)
self.assertTrue(os.path.isfile(uploaded_filepath))
# check that uploaded path exists in username files dict
self.assertIn(user_relative_upload_filepath, server.userdata[USR][server.SNAPSHOT])
os.remove(uploaded_filepath)
logging.info('"{}" removed'.format(uploaded_filepath))
def test_files_post_with_not_allowed_path(self):
"""
Test that creating a directory upper than the user root is not allowed.
"""
user_filepath = '../../../test/myfile2.dat' # path forbidden
url = SERVER_FILES_API + user_filepath
# Create temporary file for test
test_file, test_md5 = _make_temp_file()
try:
test = self.app.post(url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': test_md5},
follow_redirects=True)
finally:
test_file.close()
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
self.assertFalse(os.path.isfile(userpath2serverpath(USR, user_filepath)))
# check that uploaded path NOT exists in username files dict
self.assertNotIn(user_filepath, server.userdata[USR][server.SNAPSHOT])
def test_files_post_with_existent_path(self):
"""
Test the creation of file that already exists.
"""
path = 'test_put/file_to_change.txt' # path already existent
_create_file(USR, path, 'I already exist! Don\'t erase me!')
to_created_filepath = userpath2serverpath(USR, path)
old_content = open(to_created_filepath).read()
old_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
url = SERVER_FILES_API + path
# Create temporary file for test
test_file, test_md5 = _make_temp_file()
try:
test = self.app.post(url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': test_md5},
follow_redirects=True)
finally:
test_file.close()
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
new_content = open(to_created_filepath).read()
self.assertEqual(old_content, new_content)
new_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
self.assertEqual(old_md5, new_md5)
def test_files_post_with_bad_md5(self):
"""
Test upload with bad md5.
"""
user_relative_upload_filepath = 'testupload/testfile.txt'
upload_test_url = SERVER_FILES_API + user_relative_upload_filepath
uploaded_filepath = userpath2serverpath(USR, user_relative_upload_filepath)
assert not os.path.exists(uploaded_filepath), '"{}" file is existing'.format(uploaded_filepath)
# Create temporary file for test
test_file, not_used_md5 = _make_temp_file()
# Create fake md5 and send it instead the right md5
fake_md5 = 'sent_bad_md5'
try:
test = self.app.post(upload_test_url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': fake_md5},
follow_redirects=True)
finally:
test_file.close()
self.assertEqual(test.status_code, server.HTTP_CONFLICT)
self.assertFalse(os.path.isfile(userpath2serverpath(USR, user_relative_upload_filepath)))
# check that uploaded path NOT exists in username files dict
self.assertNotIn(user_relative_upload_filepath, server.userdata[USR][server.SNAPSHOT])
def test_files_put_with_auth(self):
"""
Test put. File content and stored md5 must be changed.
"""
path = 'test_put/file_to_change.txt'
_create_file(USR, path, 'I will change')
to_modify_filepath = userpath2serverpath(USR, path)
old_content = open(to_modify_filepath).read()
old_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
url = SERVER_FILES_API + path
# Create temporary file for test
test_file, not_used_md5 = _make_temp_file()
# Create fake md5 and send it instead the right md5
fake_md5 = 'sent_bad_md5'
try:
test = self.app.put(url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': fake_md5},
follow_redirects=True)
finally:
test_file.close()
new_content = open(to_modify_filepath).read()
self.assertEqual(old_content, new_content)
new_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
self.assertEqual(old_md5, new_md5)
self.assertEqual(test.status_code, server.HTTP_CONFLICT)
def test_files_put_of_not_existing_file(self):
"""
Test modify of not existing file..
"""
path = 'test_put/file_not_existent.txt' # not existent path
to_modify_filepath = userpath2serverpath(USR, path)
url = SERVER_FILES_API + path
# Create temporary file for test
test_file, test_md5 = _make_temp_file()
try:
test = self.app.put(url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': test_md5},
follow_redirects=True)
finally:
test_file.close()
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
self.assertNotIn(to_modify_filepath, server.userdata[USR][server.SNAPSHOT])
def test_files_put_with_bad_md5(self):
"""
Test modify with bad md5.
"""
path = 'test_put/file_to_change.txt'
_create_file(USR, path, 'I will NOT change')
to_modify_filepath = userpath2serverpath(USR, path)
old_content = open(to_modify_filepath).read()
old_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
url = SERVER_FILES_API + path
# Create temporary file for test
test_file, test_md5 = _make_temp_file()
try:
test = self.app.put(url,
headers=make_basicauth_headers(USR, PW),
data={'file': test_file, 'md5': test_md5},
follow_redirects=True)
finally:
test_file.close()
new_content = open(to_modify_filepath).read()
self.assertNotEqual(old_content, new_content)
new_md5 = server.userdata[USR][server.SNAPSHOT][path][1]
self.assertNotEqual(old_md5, new_md5)
self.assertEqual(test.status_code, server.HTTP_CREATED) # 200 or 201 (OK or created)?
def test_delete_file_path(self):
"""
Test if a created file is deleted and assures it doesn't exists anymore with assertFalse
"""
# create file to be deleted
delete_test_url = SERVER_ACTIONS_API + 'delete'
delete_test_file_path = 'testdelete/testdeletefile.txt'
to_delete_filepath = userpath2serverpath(USR, delete_test_file_path)
_create_file(USR, delete_test_file_path, 'this is the file to be deleted')
test = self.app.post(delete_test_url,
headers=make_basicauth_headers(USR, PW),
data={'filepath': delete_test_file_path}, follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_OK)
self.assertFalse(os.path.isfile(to_delete_filepath))
self.assertNotIn(delete_test_file_path, server.userdata[USR][server.SNAPSHOT])
def test_delete_file_path_with_tricky_filepath(self):
"""
Test the deleting action with a path that can fall in other user directories or upper.
"""
delete_test_url = SERVER_ACTIONS_API + 'delete'
tricky_to_delete_test_filepath = 'testdelete/../../testdeletefile.txt'
test = self.app.post(delete_test_url,
headers=make_basicauth_headers(USR, PW),
data={'filepath': tricky_to_delete_test_filepath}, follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
def test_delete_file_path_with_unexisting_filepath(self):
"""
Test if delete action returns HTTP_NOT_FOUND when trying to remove an unexisting file.
"""
delete_test_url = SERVER_ACTIONS_API + 'delete'
wrong_to_delete_test_filepath = 'testdelete/unexistingfile.dat'
test = self.app.post(delete_test_url,
headers=make_basicauth_headers(USR, PW),
data={'filepath': wrong_to_delete_test_filepath}, follow_redirects=True)
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
def test_copy_file_path(self):
"""
Test if a created source file is copied in a new created destination and assures the source file
still exists
"""
copy_test_url = SERVER_ACTIONS_API + 'copy'
src_copy_test_file_path = 'test_copy_src/testcopysrc.txt'
dst_copy_test_file_path = 'test_copy_dst/testcopydst.txt'
# Create source file to be copied and its destination.
src_copy_filepath = userpath2serverpath(USR, src_copy_test_file_path)
_create_file(USR, src_copy_test_file_path, 'this is the file to be copied')
_create_file(USR, dst_copy_test_file_path, 'different other content')
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_copy_test_file_path, 'dst': dst_copy_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_OK)
self.assertTrue(os.path.isfile(src_copy_filepath))
def test_copy_file_path_with_tricky_filepaths(self):
"""
Test the copy action with source and destination paths that can fall in other user directories or upper.
"""
copy_test_url = SERVER_ACTIONS_API + 'copy'
tricky_src_copy_test_file_path = 'test_copy_src/../../testcopysrc.txt'
tricky_dst_copy_test_file_path = 'test_copy_dst/../../testcopydst.txt'
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': tricky_src_copy_test_file_path, 'dst': tricky_dst_copy_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
def test_copy_file_path_with_unexisting_destinationfile(self):
"""
Test the creation of a destination file if this one doesn't exists from the beginning.
"""
copy_test_url = SERVER_ACTIONS_API + 'copy'
src_copy_test_file_path = 'test_copy_src/testcopysrc.txt'
dst_copy_test_file_path = 'test_copy_dst/testcopydst.txt'
# Create source file to be copied and its destination.
src_copy_filepath = userpath2serverpath(USR, src_copy_test_file_path)
_create_file(USR, src_copy_test_file_path, 'this is the file to be copied')
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_copy_test_file_path, 'dst': dst_copy_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_OK)
def test_copy_file_path_with_unexisting_source(self):
"""
Test if copy action returns HTTP_NOT_FOUND when trying to copy from an unexisting source file.
"""
copy_test_url = SERVER_ACTIONS_API + 'copy'
unexisting_src_copy_test_file_path = 'test_copy_src/unexistingcopysrc.txt'
dst_copy_test_file_path = 'test_copy_dst/testcopydst.txt'
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': unexisting_src_copy_test_file_path, 'dst': dst_copy_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
def test_move_file_path(self):
"""
Test if a created source file is moved in a new created destination and assures the source file
doesn't exists after
"""
move_test_url = SERVER_ACTIONS_API + 'move'
src_move_test_file_path = 'test_move_src/testmovesrc.txt'
dst_move_test_file_path = 'test_move_dst/testmovedst.txt'
# create source file to be moved and its destination
src_move_filepath = userpath2serverpath(USR, src_move_test_file_path)
_create_file(USR, src_move_test_file_path, 'this is the file to be moved')
test = self.app.post(move_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_move_test_file_path, 'dst': dst_move_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_OK)
self.assertFalse(os.path.isfile(src_move_filepath))
def test_move_file_path_with_wrong_cmd(self):
"""
Test if commands (delete, copy, move) exist, otherwise KeyError and throw abort.
"""
move_test_url = SERVER_ACTIONS_API + 'wrong_cmd'
src_move_test_file_path = 'test_move_src/testmovesrc.txt'
dst_move_test_file_path = 'test_move_dst/testmovedst.txt'
# create source file to be moved and its destination
_create_file(USR, src_move_test_file_path, 'this is the file to be moved')
test = self.app.post(move_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_move_test_file_path, 'dst': dst_move_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
def test_move_file_path_with_tricky_filepaths(self):
"""
Test the move action with source and destination paths that can fall in other user directories or upper.
"""
move_test_url = SERVER_ACTIONS_API + 'move'
tricky_src_move_test_file_path = 'test_move_src/../../testmovesrc.txt'
tricky_dst_move_test_file_path = 'test_move_dst/../../testmovedst.txt'
test = self.app.post(move_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': tricky_src_move_test_file_path, 'dst': tricky_dst_move_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
def test_move_file_path_with_unexisting_source(self):
"""
Test if move action returns HTTP_NOT_FOUND when trying to move from an unexisting source file.
"""
move_test_url = SERVER_ACTIONS_API + 'move'
unexisting_src_move_test_file_path = 'test_move_src/unexistingmovesrc.txt'
dst_move_test_file_path = 'test_move_dst/testmovedst.txt'
test = self.app.post(move_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': unexisting_src_move_test_file_path, 'dst': dst_move_test_file_path},
follow_redirects=True)
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
class TestGetRequests(unittest.TestCase):
"""
Test get requests.
"""
USER_RELATIVE_DOWNLOAD_FILEPATH = 'testdownload/testfile.txt'
DOWNLOAD_TEST_URL = SERVER_FILES_API + USER_RELATIVE_DOWNLOAD_FILEPATH
def setUp(self):
"""
Create an user with a POST method and create the test file to test the download from server.
"""
setup_test_dir()
self.app = server.app.test_client()
self.app.testing = True
_manually_remove_user(USR)
_manually_create_user(USR, PW)
_create_file(USR, self.USER_RELATIVE_DOWNLOAD_FILEPATH, 'some text')
def tearDown(self):
server_filepath = userpath2serverpath(USR, self.USER_RELATIVE_DOWNLOAD_FILEPATH)
if os.path.exists(server_filepath):
os.remove(server_filepath)
_manually_remove_user(USR)
tear_down_test_dir()
def test_files_get_with_auth(self):
"""
Test that server return an OK HTTP code if an authenticated user request
to download an existing file.
"""
test = self.app.get(self.DOWNLOAD_TEST_URL,
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_OK)
def test_files_get_existing_file_with_wrong_password(self):
"""
Test that server return a HTTP_UNAUTHORIZED error if
the user exists but the given password is wrong.
"""
wrong_password = PW + 'a'
test = self.app.get(self.DOWNLOAD_TEST_URL,
headers=make_basicauth_headers(USR, wrong_password))
self.assertEqual(test.status_code, server.HTTP_UNAUTHORIZED)
def test_files_get_existing_file_with_empty_password(self):
"""
Test that server return a HTTP_UNAUTHORIZED error if
the user exists but the password is an empty string.
"""
test = self.app.get(self.DOWNLOAD_TEST_URL,
headers=make_basicauth_headers(USR, ''))
self.assertEqual(test.status_code, server.HTTP_UNAUTHORIZED)
def test_files_get_existing_file_with_empty_username(self):
"""
Test that server return a HTTP_UNAUTHORIZED error if
the given user is an empty string and the password is not empty.
"""
test = self.app.get(self.DOWNLOAD_TEST_URL,
headers=make_basicauth_headers('', PW))
self.assertEqual(test.status_code, server.HTTP_UNAUTHORIZED)
def test_files_get_existing_file_with_unexisting_user(self):
"""
Test that server return a HTTP_UNAUTHORIZED error if
the given user does not exist.
"""
user = 'UnExIsTiNgUsEr'
assert user not in server.userdata
test = self.app.get(self.DOWNLOAD_TEST_URL,
headers=make_basicauth_headers(user, PW))
self.assertEqual(test.status_code, server.HTTP_UNAUTHORIZED)
def test_files_get_without_auth(self):
"""
Test unauthorized download of an existsing file.
"""
# TODO: ensure that the file exists
test = self.app.get(self.DOWNLOAD_TEST_URL)
self.assertEqual(test.status_code, server.HTTP_UNAUTHORIZED)
def test_files_get_with_not_existing_file(self):
"""
Test that error 404 is correctly returned if an authenticated user try to download
a file that does not exist.
"""
test = self.app.get(SERVER_FILES_API + 'testdownload/unexisting.txt',
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
def test_files_get_with_not_existing_directory(self):
"""
Test that error 404 is correctly returned if an authenticated user try to download
from an unexisting directory.
"""
test = self.app.get(SERVER_FILES_API + 'unexisting/unexisting.txt',
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
def test_files_get_with_tricky_file(self):
"""
Test that error 403 is correctly returned if an authenticated user try to download
a file that can fall in other user directories or upper.
"""
test = self.app.get(SERVER_FILES_API + 'testdownload/../../testfile.txt',
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
def test_files_get_snapshot(self):
"""
Test server-side user files snapshot.
"""
# The test user is created in setUp
expected_timestamp = server.userdata[USR]['server_timestamp']
expected_snapshot = server.userdata[USR]['files']
expected_shared_files = server.userdata[USR]['shared_files']
target = {server.LAST_SERVER_TIMESTAMP: expected_timestamp,
server.SNAPSHOT: expected_snapshot,
server.SHARED_FILES: expected_shared_files}
test = self.app.get(SERVER_FILES_API,
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_OK)
obj = json.loads(test.data)
self.assertEqual(obj, target)
class TestUsersPost(unittest.TestCase):
def setUp(self):
setup_test_dir()
server.reset_userdata()
self.app = server.app.test_client()
self.app.testing = True
self.username = USR
self.password = PW
self.user_dirpath = userpath2serverpath(self.username)
def tearDown(self):
tear_down_test_dir()
def test_post(self):
"""
Post request for new user
"""
new_username = '[email protected]'
new_username_password = '123.Abc'
assert new_username not in server.userdata
test = self.app.post(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'password': self.password})
# Test that user is added to userdata and is created
self.assertIn(self.username, server.userdata.keys())
self.assertEqual(test.status_code, HTTP_CREATED)
def test_user_creation_with_invalid_email(self):
"""
Test post request with a username which is not a valid email address
Example of invalid emails: [email protected], just"not"[email protected] ecc
"""
invalid_email_username = '[email protected]'
test = self.app.post(urlparse.urljoin(SERVER_API, 'users/' + invalid_email_username),
data={'password': self.password})
self.assertEqual(test.status_code, HTTP_BAD_REQUEST)
def test_user_creation_with_weak_password(self):
"""
Test post request with weak password and assures user was not saved on disk
"""
test = self.app.post(urlparse.urljoin(SERVER_API, 'users/' + self.username), data={'password': 'weak_password'})
self.assertNotIn(self.username, server.userdata.keys())
self.assertEqual(test.status_code, HTTP_FORBIDDEN)
self.assertIsInstance(json.loads(test.get_data()), dict)
def test_user_already_existing(self):
"""
Existing user --> 409 + no email.
"""
_manually_create_user(self.username, self.password)
with server.mail.record_messages() as outbox:
test = self.app.post(urlparse.urljoin(SERVER_API,
'users/' + self.username),
data={'password': self.password})
# No mail must be sent if this user already exists!
self.assertEqual(len(outbox), 0)
self.assertEqual(test.status_code, HTTP_CONFLICT)
def test_activation_email(self):
"""
Activation mail must be sent to the right recipient and *a line* of its body must be the activation code.
"""
with server.mail.record_messages() as outbox:
self.app.post(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'password': self.password})
# Retrieve the generated activation code
activation_code = server.userdata[self.username][server.USER_CREATION_DATA]['activation_code']
self.assertEqual(len(outbox), 1)
body = outbox[0].body
recipients = outbox[0].recipients
self.assertEqual(recipients, [self.username])
self.assertIn(activation_code, body.splitlines())
def test_create_user_without_password(self):
"""
Test the creation of a new user without password.
"""
_manually_create_user(self.username, self.password)
test = self.app.post(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'password': ''})
self.assertEqual(test.status_code, HTTP_BAD_REQUEST)
class TestUsersPut(unittest.TestCase):
def setUp(self):
setup_test_dir()
server.reset_userdata()
self.app = server.app.test_client()
self.app.testing = True
self.username = USR
self.password = PW
self.user_dirpath = userpath2serverpath(self.username)
assert self.username not in server.userdata
assert not os.path.exists(self.user_dirpath)
# The Users.post (signup request) is repeatable
resp = self.app.post(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'password': self.password})
# Retrieve the generated activation code
self.activation_code = server.userdata[self.username][server.USER_CREATION_DATA]['activation_code']
def tearDown(self):
tear_down_test_dir()
def test_unexisting_username(self):
"""
Not existing username and existing activation_code.
"""
unexisting_user = 'unexisting'
test = self.app.put(urlparse.urljoin(SERVER_API, 'users/' + unexisting_user),
data={'activation_code': self.activation_code})
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
self.assertNotIn(unexisting_user, server.userdata.keys())
self.assertFalse(os.path.exists(userpath2serverpath(unexisting_user)))
def test_wrong_activation_code(self):
"""
Wrong activation code
"""
test = self.app.put(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'activation_code': 'fake activation code'})
single_user_data = server.userdata[self.username]
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
self.assertFalse(single_user_data[server.USER_IS_ACTIVE])
self.assertFalse(os.path.exists(self.user_dirpath))
def test_ok(self):
"""
Right activation code --> success.
"""
# Put with correct activation code
test = self.app.put(urlparse.urljoin(SERVER_API, 'users/' + self.username),
data={'activation_code': self.activation_code})
self.assertIn(self.username, server.userdata.keys())
self.assertTrue(os.path.exists(self.user_dirpath))
single_user_data = server.userdata[self.username]
self.assertNotIn(server.USER_CREATION_DATA, single_user_data)
self.assertIn(server.USER_CREATION_TIME, single_user_data)
self.assertTrue(single_user_data[server.USER_IS_ACTIVE])
self.assertEqual(test.status_code, HTTP_OK)
def test__clean_inactive_users(self):
"""
Test the removal of users whose activation time is expired
"""
EXPUSER = 'expireduser'
VALUSER = 'validuser'
EXP_CREATION_TIME = server.now_timestamp() - server.USER_ACTIVATION_TIMEOUT - 1
VALID_CREATION_TIME = server.now_timestamp()
server.userdata[EXPUSER] = {server.USER_IS_ACTIVE: False,
server.USER_CREATION_DATA: {server.USER_CREATION_TIME: EXP_CREATION_TIME}
}
server.userdata[VALUSER] = {server.USER_IS_ACTIVE: False,
server.USER_CREATION_DATA: {server.USER_CREATION_TIME: VALID_CREATION_TIME}
}
server.Users._clean_inactive_users()
self.assertNotIn(EXPUSER, server.userdata)
class TestUsersDelete(unittest.TestCase):
def setUp(self):
setup_test_dir()
server.reset_userdata()
self.app = server.app.test_client()
self.app.testing = True
def tearDown(self):
tear_down_test_dir()
def test_delete_user(self):
"""
User deletion.
"""
# Creating user to delete on-the-fly (TODO: pre-load instead)
_manually_create_user(USR, PW)
user_dirpath = userpath2serverpath(USR)
# Really created?
assert USR in server.userdata, 'Utente "{}" non risulta tra i dati'.format(USR) # TODO: translate
assert os.path.exists(user_dirpath), 'Directory utente "{}" non trovata'.format(USR) # TODO: translate
# Test FORBIDDEN case (removing other users)
url = SERVER_API + 'users/' + 'otheruser'
test = self.app.delete(url,
headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_FORBIDDEN)
# Test OK case
url = SERVER_API + 'users/' + USR
test = self.app.delete(url,
headers=make_basicauth_headers(USR, PW))
self.assertNotIn(USR, server.userdata)
self.assertEqual(test.status_code, server.HTTP_OK)
self.assertFalse(os.path.exists(user_dirpath))
class TestUsersGet(unittest.TestCase):
def setUp(self):
setup_test_dir()
server.reset_userdata()
self.app = server.app.test_client()
self.app.testing = True
def tearDown(self):
tear_down_test_dir()
def test_get_self(self):
username = '[email protected]'
pw = '123.Abc'
_manually_create_user(username, pw)
url = SERVER_API + 'users/' + username
test = self.app.get(url, headers=make_basicauth_headers(username, pw))
self.assertEqual(test.status_code, HTTP_OK)
def test_get_other(self):
username = '[email protected]'
other_username = 'a' + username
pw = '123.Abc'
_manually_create_user(username, pw)
url = SERVER_API + 'users/' + other_username
test = self.app.get(url, headers=make_basicauth_headers(username, pw))
self.assertEqual(test.status_code, HTTP_FORBIDDEN)
class TestUsersRecoverPassword(unittest.TestCase):
def setUp(self):
setup_test_dir()
server.reset_userdata()
self.app = server.app.test_client()
self.app.testing = True
self.active_user = 'Activateduser'
self.active_user_pw = '234.Cde'
_manually_create_user(self.active_user, self.active_user_pw)
self.inactive_username = 'inactiveuser'
self.inactive_username_password = '123.Abc'
self.inactive_username_activationcode = 'randomactivationcode'
server.userdata[self.inactive_username] = {
server.USER_IS_ACTIVE: False,
server.PWD: self.inactive_username_password,
server.USER_CREATION_DATA: {'creation_timestamp': server.now_timestamp(),
'activation_code': self.inactive_username_activationcode,
},
}
def test_active_user(self):
"""
Test recover password request for an already active user
"""
url = SERVER_API + 'users/{}/reset'.format(self.active_user)
test = self.app.post(url)
self.assertEqual(test.status_code, HTTP_ACCEPTED)
self.assertIsNotNone(server.userdata[self.active_user].get('recoverpass_data'))
def test_inactive_user(self):
"""
Test recover password request for inactive user
"""
url = SERVER_API + 'users/{}/reset'.format(self.inactive_username)
previous_activation_data = server.userdata[self.inactive_username][server.USER_CREATION_DATA]
previous_inactive_activation = previous_activation_data['activation_code']
previous_inactive_timestamp = previous_activation_data['creation_timestamp']
test = self.app.post(url)
activation_data = server.userdata[self.inactive_username][server.USER_CREATION_DATA]
self.assertEqual(test.status_code, HTTP_ACCEPTED)
self.assertNotEqual(previous_inactive_activation,
activation_data['activation_code'])
self.assertLess(previous_inactive_timestamp,
activation_data['creation_timestamp'])
def test_unknown_user(self):
"""
Test recover password request for unknown user
"""
url = SERVER_API + 'users/{}/reset'.format('[email protected]')
test = self.app.post(url,
data={'password': 'okokokoko'})
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
def test_put_ok(self):
"""
Test the password recovery with correct PUT parameters.
"""
old_password = server.userdata[self.active_user]['password']
# Now we create an arbitrary recoverpass_code,
# normally created by POST in /users/<username>/reset
recoverpass_code = 'arbitrarycode'
server.userdata[self.active_user]['recoverpass_data'] = {
'recoverpass_code': recoverpass_code,
'timestamp': server.now_timestamp(),
}
# then, put with given code and new password
test = self.app.put(SERVER_API + 'users/{}'.format(self.active_user),
data={'recoverpass_code': recoverpass_code,
'password': self.active_user_pw})
self.assertEqual(test.status_code, HTTP_OK)
self.assertNotEqual(old_password, server.userdata[self.active_user]['password'])
def test_put_recoverpass_code_timeout(self):
"""
Test the put with the same valid "recoverpass" code but in 2 different times (late and in time).
"""
# First, test a PUT made too late, so the recoverpass code must be invalid,
# *then* (rewinding the clock to a time before expiration time) repeat the put with same recoverpass code,
# and this must return a success.
# NB: This is possible due to the fact that (TODO?) expired tokens are currently keep.
recoverpass_creation_time = 100 # 1970, less than a second after the midnight of 31 dec 1969 :p
server.userdata[self.active_user]['recoverpass_data'] = {
'recoverpass_code': 'ok_code',
'timestamp': recoverpass_creation_time,
}
recoverpass_expiration_time = recoverpass_creation_time + server.USER_RECOVERPASS_TIMEOUT
just_in_time = recoverpass_expiration_time - 1
too_late = recoverpass_expiration_time + 1
test_responses = []
for now in (too_late, just_in_time): # backward
server.now_timestamp = lambda: now # Time machine Python powered :)
test_responses.append(self.app.put(SERVER_API + 'users/{}'.format(self.active_user),
data={'recoverpass_code': 'ok_code',
'password': '123.Abc'}))
# The first must be expired, the second must be valid.
self.assertEqual([test.status_code for test in test_responses], [HTTP_NOT_FOUND, HTTP_OK])
def test_password_recovery_email(self):
"""
Test recovery email recipient, subject and body.
"""
with server.mail.record_messages() as outbox:
self.app.post(urlparse.urljoin(SERVER_API, 'users/{}/reset'.format(self.active_user)))
# Retrieve the generated activation code
recoverpass_data = server.userdata[self.active_user]['recoverpass_data']
recoverpass_code = recoverpass_data['recoverpass_code']
self.assertEqual(len(outbox), 1)
body = outbox[0].body
recipients = outbox[0].recipients
subject = outbox[0].subject
self.assertEqual(recipients, [self.active_user])
# A line must be the recoverpass code
self.assertIn(recoverpass_code, body.splitlines())
# The email subject and body must contain some "keywords".
self.assertIn('password', subject.lower())
self.assertTrue('change' in body and 'password' in body)
def test_put_active_user_with_no_password(self):
"""
Test a PUT request made by an active user with a wrong password
"""
test = self.app.put(SERVER_API + 'users/{}'.format(self.active_user))
self.assertEqual(test.status_code, HTTP_BAD_REQUEST)
def test_put_active_user_weak_password(self):
"""
Test put request with weak password and assures user password was not updated on disk
"""
recoverpass_code = 'arbitrarycode'
server.userdata[self.active_user]['recoverpass_data'] = {'recoverpass_code': recoverpass_code,
'timestamp': server.now_timestamp(),
}
test = self.app.put(SERVER_API + 'users/{}'.format(self.active_user),
data={'recoverpass_code': recoverpass_code,
'password': 'weakpass'})
self.assertEqual(test.status_code, HTTP_FORBIDDEN)
self.assertNotEqual(server.userdata[self.active_user]['password'], 'weakpass')
def get_dic_dir_states():
"""
Return a tuple with dictionary state and directory state of all users.
NB: Passwords are removed from the dictionary states.
:return: tuple
"""
dic_state = {}
dir_state = {}
for username in server.userdata:
single_user_data = server.userdata[username].copy()
single_user_data.pop('password') # not very beautiful
single_user_data.pop(server.USER_CREATION_TIME) # not very beautiful
dic_state[username] = single_user_data
dir_state = json.load(open('userdata.json', "rb"))
dir_state[username].pop(server.PWD) # not very beatiful cit. ibidem
dir_state[username].pop(server.USER_CREATION_TIME) # not very beatiful cit. ibidem
return dic_state, dir_state
class TestUserdataConsistence(unittest.TestCase):
"""
Testing consistence between userdata dictionary and actual files.
"""
def setUp(self):
setup_test_dir()
self.app = server.app.test_client()
self.app.testing = True
def test_consistence_after_actions(self):
"""
Complex test that do several actions and finally test the consistency.
"""
# create user
user = 'pippo'
_manually_create_user(user, 'pass')
# i need to create the userdata.json to check consistency
server.save_userdata()
# post
_create_file(user, 'new_file', 'ciao!!!')
url = SERVER_FILES_API + 'new_file'
self.app.post(url, headers=make_basicauth_headers(USR, PW))
# move
move_test_url = SERVER_ACTIONS_API + 'move'
src_move_test_file_path = 'test_move_src/testmovesrc.txt'
dst_move_test_file_path = 'test_move_dst/testmovedst.txt'
# create source file to be moved and its destination
_create_file(user, src_move_test_file_path, 'this is the file to be moved')
test = self.app.post(move_test_url,
headers=make_basicauth_headers(user, 'pass'),
data={'src': src_move_test_file_path, 'dst': dst_move_test_file_path},
follow_redirects=True)
# copy
copy_test_url = SERVER_FILES_API + 'copy'
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(user, 'pass'),
data={'src': src_move_test_file_path, 'dst': dst_move_test_file_path},
follow_redirects=True)
# intermediate check
dic_state, dir_state = get_dic_dir_states()
self.assertEqual(dic_state[user]['files'], dir_state[user]['files'])
user, pw = 'pippo', 'pass'
# delete new_file
delete_test_url = SERVER_ACTIONS_API + 'delete'
self.app.post(delete_test_url,
headers=make_basicauth_headers(user, pw),
data={'filepath': "new_file"})
# check consistency
dic_state, dir_state = get_dic_dir_states()
self.assertEqual(dic_state[user]['files'], dir_state[user]['files'])
# WIP: Test not complete. TODO: Do more things! Put, ...?
# class TestLoggingConfiguration(unittest.TestCase):
# """
# Testing log directory creation if it doesn't exists
# """
#
# def setUp(self):
# if os.path.isdir('log'):
# shutil.rmtree('log')
#
# def test_create_log_directory(self):
# self.assertFalse(os.path.exists('log') and os.path.isdir('log'))
# reload(server)
# self.assertTrue(os.path.exists('log') and os.path.isdir('log'))
class TestShares(unittest.TestCase):
def setUp(self):
"""
Create users, folders and files to test the sharing feature.
"""
setup_test_dir()
self.app = server.app.test_client()
self.app.testing = True
_manually_remove_user(USR)
_manually_create_user(USR, PW)
_manually_create_user(SHAREUSR, SHAREUSRPW)
def tearDown(self):
_manually_remove_user(USR)
_manually_remove_user(SHAREUSR)
tear_down_test_dir()
def test_create_file_share(self):
sharedFile = 'test.txt'
_create_file(USR, sharedFile, 'test')
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFile))
#check if the owner correctly shared access to the file with the sharing receiver
self.assertIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedFile])
#check if the sharing receiver correctly received the shared file access from the owner
self.assertIn(sharedFile, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_create_folder_share(self):
sharedPath = 'Misc'
q = urlparse.urljoin(SERVER_SHARES_API, sharedPath + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedRealPath = userpath2serverpath(os.path.join(USR,sharedPath))
#check if the owner correctly shared access to the path with the sharing receiver
self.assertIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedPath])
#check if the sharing receiver correctly received the shared path access from the owner
self.assertIn(sharedPath, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_create_illegal_share(self):
sharedFile = 'Misc/test.txt'
_create_file(USR, sharedFile, 'test')
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFile))
#check that the owner have no shares with the receiver
self.assertNotIn(sharedFileRealPath, server.userdata[USR]['shared_with_others'].keys())
#check that the sharing receiver have no shares from the owner
self.assertNotIn(USR, server.userdata[SHAREUSR]['shared_with_me'].keys())
self.assertEqual(test.status_code, HTTP_FORBIDDEN)
def test_create_not_existing_share(self):
sharedFile = 'myfile.txt'
#_create_file(USR, sharedFile, 'test')
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFile))
#check that the owner have no shares with the receiver
self.assertNotIn(sharedFileRealPath, server.userdata[USR]['shared_with_others'].keys())
#check that the sharing receiver have no shares from the owner
self.assertNotIn(USR, server.userdata[SHAREUSR]['shared_with_me'].keys())
self.assertEqual(test.status_code, HTTP_NOT_FOUND)
def test_share_already_shared_folder(self):
sharedPath = 'Misc'
q = urlparse.urljoin(SERVER_SHARES_API, sharedPath + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
test_2 = self.app.post(q, headers=make_basicauth_headers(USR, PW))
self.assertEqual(test_2.status_code, HTTP_CONFLICT)
def test_get_shared_file(self):
"""
Server return HTTP_OK code if an authenticated user request an existing shared file.
"""
#create file to share
sharedFile = 'test.txt'
_create_file(USR, sharedFile, 'test')
#share the file
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
#create get:files request: API/V1/files/shared/<owner>/<resource path>
SHARED_DOWNLOAD_FILEPATH = 'shared/'+ USR + '/' + sharedFile
DOWNLOAD_SHARED_TEST_URL = SERVER_FILES_API + SHARED_DOWNLOAD_FILEPATH
test = self.app.get(DOWNLOAD_SHARED_TEST_URL,
headers=make_basicauth_headers(SHAREUSR, SHAREUSRPW))
self.assertEqual(test.status_code, server.HTTP_OK)
def test_get_file_in_shared_folder(self):
"""
Server return HTTP_OK code if an authenticated user request an existing file from a shared folder.
"""
#share the folder
q = urlparse.urljoin(SERVER_SHARES_API, 'Music/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
#create get:files request: API/V1/files/shared/<owner>/<resource path>
SHARED_DOWNLOAD_FILEPATH = 'shared/'+ USR + '/Music/Music.txt'
DOWNLOAD_SHARED_TEST_URL = SERVER_FILES_API + SHARED_DOWNLOAD_FILEPATH
test = self.app.get(DOWNLOAD_SHARED_TEST_URL,
headers=make_basicauth_headers(SHAREUSR, SHAREUSRPW))
self.assertEqual(test.status_code, server.HTTP_OK)
def test_get_file_in_not_shared_folder(self):
"""
Server return HTTP_NOT_FOUND code if an authenticated user request an existing file from a not shared but legal folder.
"""
q = urlparse.urljoin(SERVER_SHARES_API, 'Music/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
#create get:files request: API/V1/files/shared/<owner>/<resource path>
SHARED_DOWNLOAD_FILEPATH = 'shared/'+ USR + '/Work/Work.txt'
DOWNLOAD_SHARED_TEST_URL = SERVER_FILES_API + SHARED_DOWNLOAD_FILEPATH
test = self.app.get(DOWNLOAD_SHARED_TEST_URL,
headers=make_basicauth_headers(SHAREUSR, SHAREUSRPW))
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
def test_remove_shared_file(self):
sharedFile = 'test.txt'
_create_file(USR, sharedFile, 'test')
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFile))
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertNotIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedFile])
self.assertNotIn(sharedFile, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_remove_shared_folder(self):
sharedFolder = 'Music'
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFolder))
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + SHAREUSR)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertNotIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedFolder])
self.assertNotIn(sharedFolder, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_remove_shared_file_with_no_user(self):
sharedFile = 'test.txt'
_create_file(USR, sharedFile, 'test')
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFile))
q = urlparse.urljoin(SERVER_SHARES_API, sharedFile)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertNotIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedFile])
self.assertNotIn(sharedFile, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_remove_shared_folder_with_no_user(self):
sharedFolder = 'Music'
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
sharedFileRealPath = userpath2serverpath(os.path.join(USR,sharedFolder))
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertNotIn(SHAREUSR, server.userdata[USR]['shared_with_others'][sharedFolder])
self.assertNotIn(sharedFolder, server.userdata[SHAREUSR]['shared_with_me'][USR])
def test_remove_not_shared_folder(self):
sharedFolder = 'Music'
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
def test_remove_shared_folder_with_wrong_user(self):
sharedFolder = 'Music'
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + SHAREUSR)
test = self.app.post(q, headers=make_basicauth_headers(USR, PW))
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + 'Other_User')
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
self.assertEqual(test.status_code, server.HTTP_NOT_FOUND)
q = urlparse.urljoin(SERVER_SHARES_API, sharedFolder + '/' + SHAREUSR)
test = self.app.delete(q, headers=make_basicauth_headers(USR, PW))
def test_delete_file_from_shared_folder(self):
"""
Test if a created file is deleted and assures it doesn't exists anymore with assertFalse
"""
delete_test_url = SERVER_ACTIONS_API + 'delete'
delete_test_file_path = 'Music/Music.txt'
to_delete_filepath = userpath2serverpath(USR, delete_test_file_path)
q = urlparse.urljoin(SERVER_SHARES_API, 'Music/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
#self.assertIn('shared/[email protected]/Music/Music.txt', server.userdata[SHAREUSR]['shared_files'])
test = self.app.post(delete_test_url,
headers=make_basicauth_headers(USR, PW),
data={'filepath': delete_test_file_path}, follow_redirects=True)
#Check that there is no file and no folder, as it is empty shared.
#self.assertNotIn('Music', server.userdata[SHAREUSR]['shared_with_me'][USR])
self.assertNotIn('shared/[email protected]/Music/Music.txt', server.userdata[SHAREUSR]['shared_files'])
def test_copy_file_to_shared_folder(self):
"""
Test if a created source file is copied in a shared folder and assures that the new file is shared too.
"""
copy_test_url = SERVER_ACTIONS_API + 'copy'
src_copy_test_file_path = 'Misc/Misc.txt'
dst_copy_test_file_path = 'Work/MiscCopy.txt'
# Create source file to be copied and its destination.
q = urlparse.urljoin(SERVER_SHARES_API, 'Work/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_copy_test_file_path, 'dst': dst_copy_test_file_path},
follow_redirects=True)
self.assertIn('shared/[email protected]/Work/MiscCopy.txt', server.userdata[SHAREUSR]['shared_files'])
def test_move_file_to_shared_folder(self):
"""
Test if a created source file is copied in a shared folder and assures that the new file is shared too.
"""
copy_test_url = SERVER_ACTIONS_API + 'move'
src_copy_test_file_path = 'Misc/Misc.txt'
dst_copy_test_file_path = 'Work/MiscCopy.txt'
# Create source file to be copied and its destination.
q = urlparse.urljoin(SERVER_SHARES_API, 'Work/' + SHAREUSR)
share = self.app.post(q, headers=make_basicauth_headers(USR, PW))
test = self.app.post(copy_test_url,
headers=make_basicauth_headers(USR, PW),
data={'src': src_copy_test_file_path, 'dst': dst_copy_test_file_path},
follow_redirects=True)
self.assertIn('shared/[email protected]/Work/MiscCopy.txt', server.userdata[SHAREUSR]['shared_files'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
bobwalker99/Pydev | plugins/org.python.pydev.jython/Lib/email/base64MIME.py | 135 | 5794 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: [email protected]
"""Base64 content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
characters encoding known as Base64.
It is used in the MIME standards for email to attach images, audio, and text
using some 8-bit character sets to messages.
This module provides an interface to encode and decode both headers and bodies
with Base64 encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character conversion
necessary for proper internationalized headers; it only does dumb encoding and
decoding. To deal with the various line wrapping issues, use the email.header
module.
"""
__all__ = [
'base64_len',
'body_decode',
'body_encode',
'decode',
'decodestring',
'encode',
'encodestring',
'header_encode',
]
from binascii import b2a_base64, a2b_base64
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
EMPTYSTRING = ''
# See also Charset.py
MISC_LEN = 7
# Helpers
def base64_len(s):
"""Return the length of s when it is encoded with base64."""
groups_of_3, leftover = divmod(len(s), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
# Thanks, Tim!
n = groups_of_3 * 4
if leftover:
n += 4
return n
def header_encode(header, charset='iso-8859-1', keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with Base64 encoding in a given charset.
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
encoding, except that each line must be intelligently wrapped (respecting
the Base64 encoding), and subsequent lines must start with a space.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
The resulting string will be in the form:
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
with each line wrapped at, at most, maxlinelen characters (defaults to 76
characters).
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
base64ed = []
max_encoded = maxlinelen - len(charset) - MISC_LEN
max_unencoded = max_encoded * 3 // 4
for i in range(0, len(header), max_unencoded):
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
# Now add the RFC chrome to each encoded chunk
lines = []
for line in base64ed:
# Ignore the last character of each line if it is a newline
if line.endswith(NL):
line = line[:-1]
# Add the chrome
lines.append('=?%s?b?%s?=' % (charset, line))
# Glue the lines together and return it. BAW: should we be able to
# specify the leading whitespace in the joiner?
joiner = eol + ' '
return joiner.join(lines)
def encode(s, binary=True, maxlinelen=76, eol=NL):
"""Encode a string with base64.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters).
If binary is False, end-of-line characters will be converted to the
canonical email end-of-line sequence \\r\\n. Otherwise they will be left
verbatim (this is the default).
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
"""
if not s:
return s
if not binary:
s = fix_eols(s)
encvec = []
max_unencoded = maxlinelen * 3 // 4
for i in range(0, len(s), max_unencoded):
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
# adding a newline to the encoded string?
enc = b2a_base64(s[i:i + max_unencoded])
if enc.endswith(NL) and eol != NL:
enc = enc[:-1] + eol
encvec.append(enc)
return EMPTYSTRING.join(encvec)
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
def decode(s, convert_eols=None):
"""Decode a raw base64 string.
If convert_eols is set to a string value, all canonical email linefeeds,
e.g. "\\r\\n", in the decoded text will be converted to the value of
convert_eols. os.linesep is a good choice for convert_eols if you are
decoding a text attachment.
This function does not parse a full MIME header value encoded with
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
level email.header class for that functionality.
"""
if not s:
return s
dec = a2b_base64(s)
if convert_eols:
return dec.replace(CRLF, convert_eols)
return dec
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
| epl-1.0 |
aurelijusb/arangodb | 3rdParty/V8-4.3.61/build/gyp/pylib/gyp/generator/ninja.py | 56 | 88430 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_name,
is_executable, self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
dlldesc = 'LINK%s(DLL) $dll' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd = FullLinkCommand(dllcmd, '$dll', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp' %
sys.executable)
exe_cmd = FullLinkCommand(exe_cmd, '$out', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $out' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.2/tests/regressiontests/admin_ordering/tests.py | 42 | 1308 | from django.test import TestCase
from django.contrib.admin.options import ModelAdmin
from models import Band
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.queryset uses the ordering we define in
ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
b1 = Band(name='Aerosmith', bio='', rank=3)
b1.save()
b2 = Band(name='Radiohead', bio='', rank=1)
b2.save()
b3 = Band(name='Van Halen', bio='', rank=2)
b3.save()
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, None)
names = [b.name for b in ma.queryset(None)]
self.assertEqual([u'Aerosmith', u'Radiohead', u'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, None)
names = [b.name for b in ma.queryset(None)]
self.assertEqual([u'Radiohead', u'Van Halen', u'Aerosmith'], names)
| bsd-3-clause |
ivandevp/django | django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
MoKee/android_kernel_bn_omap | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/tests/test_utils.py | 149 | 1466 | from __future__ import division, absolute_import, print_function
import sys
from numpy.core import arange
from numpy.testing import (
run_module_suite, assert_, assert_equal, dec
)
from numpy.lib import deprecate
import numpy.lib.utils as utils
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
@dec.skipif(sys.flags.optimize == 2)
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
import_modules=False)
out = out.getvalue()
assert_('numpy.linalg.eig' in out)
@deprecate
def old_func(self, x):
return x
@deprecate(message="Rather use new_func2")
def old_func2(self, x):
return x
def old_func3(self, x):
return x
new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3")
def test_deprecate_decorator():
assert_('deprecated' in old_func.__doc__)
def test_deprecate_decorator_message():
assert_('Rather use new_func2' in old_func2.__doc__)
def test_deprecate_fn():
assert_('old_func3' in new_func3.__doc__)
assert_('new_func3' in new_func3.__doc__)
def test_safe_eval_nameconstant():
# Test if safe_eval supports Python 3.4 _ast.NameConstant
utils.safe_eval('None')
def test_byte_bounds():
a = arange(12).reshape(3, 4)
low, high = utils.byte_bounds(a)
assert_equal(high - low, a.size * a.itemsize)
if __name__ == "__main__":
run_module_suite()
| mit |
mewtaylor/django | django/contrib/messages/storage/cookie.py | 471 | 6545 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
40223139/39g7test | static/Brython3.1.3-20150514-095342/Lib/select.py | 730 | 9440 | """
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
| gpl-3.0 |
BrotherPhil/django | tests/template_tests/syntax_tests/test_now.py | 443 | 2037 | from datetime import datetime
from django.test import SimpleTestCase
from django.utils.formats import date_format
from ..utils import setup
class NowTagTests(SimpleTestCase):
@setup({'now01': '{% now "j n Y" %}'})
def test_now01(self):
"""
Simple case
"""
output = self.engine.render_to_string('now01')
self.assertEqual(output, "%d %d %d" % (
datetime.now().day, datetime.now().month, datetime.now().year,
))
# Check parsing of locale strings
@setup({'now02': '{% now "DATE_FORMAT" %}'})
def test_now02(self):
output = self.engine.render_to_string('now02')
self.assertEqual(output, date_format(datetime.now()))
@setup({'now03': '{% now \'j n Y\' %}'})
def test_now03(self):
"""
#15092 - Also accept simple quotes
"""
output = self.engine.render_to_string('now03')
self.assertEqual(output, "%d %d %d" % (
datetime.now().day, datetime.now().month, datetime.now().year,
))
@setup({'now04': '{% now \'DATE_FORMAT\' %}'})
def test_now04(self):
output = self.engine.render_to_string('now04')
self.assertEqual(output, date_format(datetime.now()))
@setup({'now05': '{% now \'j "n" Y\'%}'})
def test_now05(self):
output = self.engine.render_to_string('now05')
self.assertEqual(output, '%d "%d" %d' % (
datetime.now().day, datetime.now().month, datetime.now().year,
))
@setup({'now06': '{% now "j \'n\' Y"%}'})
def test_now06(self):
output = self.engine.render_to_string('now06')
self.assertEqual(output, "%d '%d' %d" % (
datetime.now().day, datetime.now().month, datetime.now().year,
))
@setup({'now07': '{% now "j n Y" as N %}-{{N}}-'})
def test_now07(self):
output = self.engine.render_to_string('now07')
self.assertEqual(output, '-%d %d %d-' % (
datetime.now().day, datetime.now().month, datetime.now().year,
))
| bsd-3-clause |
SmartArduino/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| lgpl-2.1 |
ajenhl/eats | server/eats/tests/views/test_entity_merge.py | 1 | 6154 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from eats.models import Entity
from eats.tests.views.view_test_case import ViewTestCase
class EntityMergeViewTestCase (ViewTestCase):
def setUp (self):
super(EntityMergeViewTestCase, self).setUp()
self.authority_id = self.authority.get_id()
user = self.create_django_user('user', '[email protected]', 'password')
self.editor = self.create_user(user)
self.editor.editable_authorities = [self.authority]
self.editor.set_current_authority(self.authority)
def test_authentication (self):
entity = self.tm.create_entity(self.authority)
url = reverse('entity-merge', kwargs={'entity_id': entity.get_id()})
login_url = settings.LOGIN_URL + '?next=' + url
response = self.app.get(url)
self.assertRedirects(response, login_url)
user = self.create_django_user('user2', '[email protected]', 'password')
response = self.app.get(url, user='user2')
self.assertRedirects(response, login_url)
self.create_user(user)
response = self.app.get(url, user='user2')
self.assertRedirects(response, login_url)
response = self.app.get(url, user='user')
self.assertEqual(response.status_code, 200)
def test_non_existent_entity (self):
url = reverse('entity-merge', kwargs={'entity_id': 0})
self.app.get(url, status=404, user='user')
def test_missing_merge_entity (self):
entity = self.tm.create_entity(self.authority)
url = reverse('entity-merge', kwargs={'entity_id': entity.get_id()})
response = self.app.get(url, user='user')
form = response.forms['entity-merge-form']
form['merge_entity_1'] = None
response = form.submit()
# No redirect, due to invalid form.
self.assertEqual(response.status_code, 200)
def test_unauthorised (self):
authority2 = self.create_authority('new authority')
entity_type1 = self.create_entity_type('Person')
entity_type2 = self.create_entity_type('Being')
self.authority.set_entity_types([entity_type1, entity_type2])
authority2.set_entity_types([entity_type1])
entity1 = self.tm.create_entity(self.authority)
entity1_type1 = entity1.create_entity_type_property_assertion(
self.authority, entity_type1)
entity2 = self.tm.create_entity(self.authority)
entity2_type1 = entity2.create_entity_type_property_assertion(
self.authority, entity_type2)
entity2_type2 = entity2.create_entity_type_property_assertion(
authority2, entity_type1)
self.assertEqual(Entity.objects.count(), 2)
url = reverse('entity-merge', kwargs={'entity_id': entity1.get_id()})
response = self.app.get(url, user='user')
form = response.forms['entity-merge-form']
form['merge_entity_1'] = entity2.get_id()
response = form.submit()
# No redirect, due to the merge entity having property
# assertions that the user is not an editor for.
self.assertEqual(response.status_code, 200)
self.assertEqual(Entity.objects.count(), 2)
self.assertEqual(set(entity1.get_entity_types()), set([entity1_type1]))
self.assertEqual(set(entity2.get_entity_types()),
set([entity2_type1, entity2_type2]))
def test_successful_merge (self):
entity_type1 = self.create_entity_type('Person')
entity_type2 = self.create_entity_type('Being')
self.authority.set_entity_types([entity_type1, entity_type2])
entity1 = self.tm.create_entity(self.authority)
type1 = entity1.create_entity_type_property_assertion(self.authority,
entity_type1)
entity2 = self.tm.create_entity(self.authority)
type2 = entity2.create_entity_type_property_assertion(self.authority,
entity_type2)
type3 = entity2.create_entity_type_property_assertion(self.authority,
entity_type1)
self.assertEqual(Entity.objects.count(), 2)
url = reverse('entity-merge', kwargs={'entity_id': entity1.get_id()})
response = self.app.get(url, user='user')
form = response.forms['entity-merge-form']
form['merge_entity_1'] = entity2.get_id()
response = form.submit().follow()
self.assertEqual(response.request.path_qs,
reverse('entity-change',
kwargs={'entity_id': entity1.get_id()}))
self.assertEqual(Entity.objects.count(), 1)
self.assertEqual(set(entity1.get_entity_types()),
set([type1, type2]))
def test_merge_redirect (self):
# A merged entity should have URLs based on its identifier
# redirect to the appropriate page for the entity it was
# merged into.
entity1 = self.tm.create_entity(self.authority)
entity2 = self.tm.create_entity(self.authority)
views = ('entity-view', 'entity-eatsml-view', 'entity-change',
'entity-delete', 'entity-merge')
for view in views:
url = reverse(view, kwargs={'entity_id': entity2.get_id()})
response = self.app.get(url, user='user')
self.assertEqual(response.status_code, 200,
'Got an incorrect response for the "%s" view'
% view)
entity1.merge_in(entity2)
for view in views:
url = reverse(view, kwargs={'entity_id': entity2.get_id()})
response = self.app.get(url, user='user')
self.assertEqual(response.status_code, 301)
redirect_url = reverse(view, kwargs={'entity_id': entity1.get_id()})
self.assertRedirects(response, redirect_url, status_code=301,
msg_prefix='With the "%s" view'
% view)
| gpl-3.0 |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt.py | 69 | 16846 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
import qt
except ImportError:
raise ImportError("Qt backend requires pyqt to be installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : qt.Qt.PointingHandCursor,
cursors.HAND : qt.Qt.WaitCursor,
cursors.POINTER : qt.Qt.ArrowCursor,
cursors.SELECT_REGION : qt.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one
"""
if qt.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = qt.QApplication( [" "] )
qt.QObject.connect( qApp, qt.SIGNAL( "lastWindowClosed()" ),
qApp, qt.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
qt.qApp.exec_loop()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( qt.QWidget, FigureCanvasBase ):
keyvald = { qt.Qt.Key_Control : 'control',
qt.Qt.Key_Shift : 'shift',
qt.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
qt.QWidget.__init__( self, None, "QWidget figure" )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
w,h = self.get_width_height()
self.resize( w, h )
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
qt.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQt.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
qt.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return qt.QSize( w, h )
def minumumSizeHint( self ):
return qt.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = event.text().latin1()
elif event.key() in self.keyvald.has_key:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = qt.QMainWindow( None, None, qt.Qt.WDestructiveClose )
self.window.closeEvent = self._widgetCloseEvent
centralWidget = qt.QWidget( self.window )
self.canvas.reparent( centralWidget, qt.QPoint( 0, 0 ) )
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( qt.QWidget.ClickFocus )
self.canvas.setFocus()
self.window.setCaption( "Figure %d" % num )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, centralWidget)
# Use a vertical layout for the plot and the toolbar. Set the
# stretch to all be in the plot so the toolbar doesn't resize.
self.layout = qt.QVBoxLayout( centralWidget )
self.layout.addWidget( self.canvas, 1 )
if self.toolbar:
self.layout.addWidget( self.toolbar, 0 )
self.window.setCentralWidget( centralWidget )
# Reset the window height so the canvas will be the right
# size. This ALMOST works right. The first issue is that the
# height w/ a toolbar seems to be off by just a little bit (so
# we add 4 pixels). The second is that the total width/height
# is slightly smaller that we actually want. It seems like
# the border of the window is being included in the size but
# AFAIK there is no way to get that size.
w = self.canvas.width()
h = self.canvas.height()
if self.toolbar:
h += self.toolbar.height() + 4
self.window.resize( w, h )
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _widgetCloseEvent( self, event ):
self._widgetclosed()
qt.QWidget.closeEvent( self.window, event )
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close(True)
def set_window_title(self, title):
self.window.setCaption(title)
class NavigationToolbar2QT( NavigationToolbar2, qt.QWidget ):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.ppm', 'home'),
('Back', 'Back to previous view','back.ppm', 'back'),
('Forward', 'Forward to next view','forward.ppm', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.ppm', 'pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.ppm', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.ppm', 'save_figure'),
)
def __init__( self, canvas, parent ):
self.canvas = canvas
self.buttons = {}
qt.QWidget.__init__( self, parent )
# Layout toolbar buttons horizontally.
self.layout = qt.QHBoxLayout( self )
self.layout.setMargin( 2 )
NavigationToolbar2.__init__( self, canvas )
def _init_toolbar( self ):
basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text == None:
self.layout.addSpacing( 8 )
continue
fname = os.path.join( basedir, image_file )
image = qt.QPixmap()
image.load( fname )
button = qt.QPushButton( qt.QIconSet( image ), "", self )
qt.QToolTip.add( button, tooltip_text )
self.buttons[ text ] = button
# The automatic layout doesn't look that good - it's too close
# to the images so add a margin around it.
margin = 4
button.setFixedSize( image.width()+margin, image.height()+margin )
qt.QObject.connect( button, qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
self.layout.addWidget( button )
self.buttons[ 'Pan' ].setToggleButton( True )
self.buttons[ 'Zoom' ].setToggleButton( True )
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
self.locLabel = qt.QLabel( "", self )
self.locLabel.setAlignment( qt.Qt.AlignRight | qt.Qt.AlignVCenter )
self.locLabel.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Ignored,
qt.QSizePolicy.Ignored))
self.layout.addWidget( self.locLabel, 1 )
# reference holder for subplots_adjust window
self.adj_window = None
def destroy( self ):
for text, tooltip_text, image_file, callback in self.toolitems:
if text is not None:
qt.QObject.disconnect( self.buttons[ text ],
qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
def pan( self, *args ):
self.buttons[ 'Zoom' ].setOn( False )
NavigationToolbar2.pan( self, *args )
def zoom( self, *args ):
self.buttons[ 'Pan' ].setOn( False )
NavigationToolbar2.zoom( self, *args )
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.locLabel.setText( s )
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
qt.QApplication.restoreOverrideCursor()
qt.QApplication.setOverrideCursor( qt.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = qt.QMainWindow(None, None, qt.Qt.WDestructiveClose)
win = self.adj_window
win.setCaption("Subplot Configuration Tool")
toolfig = Figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
canvas = self._get_canvas(toolfig)
tool = SubplotTool(self.canvas.figure, toolfig)
centralWidget = qt.QWidget(win)
canvas.reparent(centralWidget, qt.QPoint(0, 0))
win.setCentralWidget(centralWidget)
layout = qt.QVBoxLayout(centralWidget)
layout.addWidget(canvas, 1)
win.resize(w, h)
canvas.setFocus()
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = qt.QFileDialog.getSaveFileName(
start, filters, self, "Save image", "Choose a filename to save to",
selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
qt.QMessageBox.critical(
self, "Error saving file", str(e),
qt.QMessageBox.Ok, qt.QMessageBox.NoButton)
def set_history_buttons( self ):
canBackward = ( self._views._pos > 0 )
canForward = ( self._views._pos < len( self._views._elements ) - 1 )
self.buttons[ 'Back' ].setEnabled( canBackward )
self.buttons[ 'Forward' ].setEnabled( canForward )
# set icon used when windows are minimized
try:
# TODO: This is badly broken
qt.window_set_default_icon_from_file (
os.path.join( matplotlib.rcParams['datapath'], 'images', 'matplotlib.svg' ) )
except:
verbose.report( 'Could not load matplotlib icon: %s' % sys.exc_info()[1] )
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
qt.QMessageBox.warning( None, "Matplotlib", msg, qt.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| gpl-3.0 |
Kapeli/PopClip-Extensions | source/OneNote/requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
sephii/django | django/contrib/auth/handlers/modwsgi.py | 115 | 1344 | from django.contrib import auth
from django import db
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
| bsd-3-clause |
tushar-dadlani/tsung | docs/conf.py | 12 | 7726 | # -*- coding: utf-8 -*-
#
# Tsung documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 19 12:07:49 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tsung'
copyright = u'2013, Nicolas Niclausse'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6.0'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tsungdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tsung.tex', u'Tsung Documentation',
u'Nicolas Niclausse', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tsung', u'Tsung Documentation',
[u'Nicolas Niclausse'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tsung', u'Tsung Documentation',
u'Nicolas Niclausse', 'Tsung', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 |
josiah-wolf-oberholtzer/supriya | tests/patterns/test_SequencePattern.py | 1 | 1977 | import pytest
from supriya.patterns import SequencePattern
from supriya.patterns.testutils import run_pattern_test
@pytest.mark.parametrize(
"stop_at, sequence, iterations, expected, is_infinite",
[
(None, [1, 2, 3], None, [1, 2, 3], True),
(None, [1, 2, 3], 1, [1, 2, 3], False),
(None, [1, 2, 3], 2, [1, 2, 3, 1, 2, 3], False),
(None, [1, 2, 3, SequencePattern(["a", "b"])], 1, [1, 2, 3, "a", "b"], False),
(
None,
[1, 2, 3, SequencePattern(["a", "b"], None)],
1,
[1, 2, 3, "a", "b"],
True,
),
(
None,
[SequencePattern([1, 2, 3]), SequencePattern(["a", "b"])],
1,
[1, 2, 3, "a", "b"],
False,
),
(
None,
[SequencePattern([1, 2, 3]), SequencePattern(["a", "b"])],
2,
[1, 2, 3, "a", "b", 1, 2, 3, "a", "b"],
False,
),
(
None,
[SequencePattern([1, 2, 3], None), SequencePattern(["a", "b"])],
1,
[1, 2, 3],
True,
),
(
None,
[SequencePattern([1, 2, 3], None), SequencePattern(["a", "b"])],
None,
[1, 2, 3],
True,
),
],
)
def test(stop_at, sequence, iterations, expected, is_infinite):
pattern = SequencePattern(sequence, iterations=iterations)
run_pattern_test(pattern, expected, is_infinite, stop_at)
@pytest.mark.parametrize(
"sequence, iterations, raises",
[
([1, 2, 3], 1, None),
([1, 2, 3], 10, None),
([1, 2, 3], None, None),
([1, 2, 3], 0, ValueError),
(23, 1, ValueError),
],
)
def test___init__(sequence, iterations, raises):
if raises:
with pytest.raises(raises):
SequencePattern(sequence, iterations)
else:
SequencePattern(sequence, iterations)
| mit |
israeltobias/DownMedia | youtube-dl/test/test_youtube_signature.py | 28 | 4025 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from test.helper import FakeYDL
from youtube_dl.extractor import YoutubeIE
from youtube_dl.compat import compat_str, compat_urlretrieve
_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
'js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
'js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
'js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
'js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
'js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
'js',
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
)
]
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, stype, sig_input, expected_sig):
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
assert m, '%r should follow URL format' % url
test_id = m.group(1)
def test_func(self):
basename = 'player-%s.%s' % (test_id, stype)
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ydl = FakeYDL()
ie = YoutubeIE(ydl)
if stype == 'js':
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
else:
assert stype == 'swf'
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfcode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
akbargumbira/inasafe | safe/report/impact_report.py | 3 | 24392 | # coding=utf-8
"""Module to generate impact report.
Enable dynamic report generation based on report metadata.
Easily customize map report or document based report.
"""
import imp
import logging
import os
import shutil
from qgis.core import (
QgsComposition,
QgsRectangle,
QgsRasterLayer)
from safe.common.exceptions import (
KeywordNotFoundError)
from safe.definitions.messages import disclaimer
from safe.defaults import (
white_inasafe_logo_path,
black_inasafe_logo_path,
supporters_logo_path,
default_north_arrow_path)
from safe import messaging as m
from safe.messaging import styles
from safe.utilities.i18n import tr
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.utilities import get_error_message
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
SUGGESTION_STYLE = styles.GREEN_LEVEL_4_STYLE
WARNING_STYLE = styles.RED_LEVEL_4_STYLE
LOGGER = logging.getLogger('InaSAFE')
class InaSAFEReportContext(object):
"""A class to compile all InaSAFE related context for reporting uses.
.. versionadded:: 4.0
"""
def __init__(self):
"""Create InaSAFE Report context."""
self._black_inasafe_logo = black_inasafe_logo_path()
self._white_inasafe_logo = white_inasafe_logo_path()
# User can change this path in preferences
self._organisation_logo = supporters_logo_path()
self._supporters_logo = supporters_logo_path()
self._north_arrow = default_north_arrow_path()
self._disclaimer = disclaimer()
@property
def north_arrow(self):
"""Getter to north arrow path.
:rtype: str
"""
return self._north_arrow
@north_arrow.setter
def north_arrow(self, north_arrow_path):
"""Set image that will be used as north arrow in reports.
:param north_arrow_path: Path to the north arrow image.
:type north_arrow_path: str
"""
if isinstance(north_arrow_path, basestring) and os.path.exists(
north_arrow_path):
self._north_arrow = north_arrow_path
else:
self._north_arrow = default_north_arrow_path()
@property
def inasafe_logo(self):
"""Getter to safe logo path.
.. versionchanged:: 3.2 - this property is now read only.
:rtype: str
"""
return self.black_inasafe_logo
@property
def black_inasafe_logo(self):
"""Getter to black inasafe logo path.
:rtype: str
"""
return self._black_inasafe_logo
@property
def white_inasafe_logo(self):
"""Getter for white inasafe logo path.
:rtype: str
"""
return self._white_inasafe_logo
@property
def organisation_logo(self):
"""Getter to organisation logo path.
:rtype: str
"""
return self._organisation_logo
@organisation_logo.setter
def organisation_logo(self, logo):
"""Set image that will be used as organisation logo in reports.
:param logo: Path to the organisation logo image.
:type logo: str
"""
if isinstance(logo, basestring) and os.path.exists(logo):
self._organisation_logo = logo
else:
self._organisation_logo = supporters_logo_path()
@property
def supporters_logo(self):
"""Getter to supporters logo path - this is a read only property.
It always returns the InaSAFE supporters logo unlike the organisation
logo which is customisable.
.. versionadded:: 3.2
:rtype: str
"""
return self._supporters_logo
@property
def disclaimer(self):
"""Getter to disclaimer.
:rtype: str
"""
return self._disclaimer
@disclaimer.setter
def disclaimer(self, text):
"""Set text that will be used as disclaimer in reports.
:param text: Disclaimer text
:type text: str
"""
if not isinstance(text, basestring):
self._disclaimer = disclaimer()
else:
self._disclaimer = text
class QGISCompositionContext(object):
"""A class to hold the value for QGISComposition object.
.. versionadded:: 4.0
"""
def __init__(self, extent, map_settings, page_dpi):
"""Create QGISComposition context."""
self._extent = extent
self._map_settings = map_settings
self._page_dpi = page_dpi
self._plot_style = QgsComposition.Print
self._save_as_raster = True
@property
def page_dpi(self):
"""The Page DPI that QGISComposition uses.
Can be overriden by report metadata
:rtype: float
"""
return self._page_dpi
@page_dpi.setter
def page_dpi(self, value):
"""Page DPI.
:param value: DPI value for printing
:type value: float
"""
self._page_dpi = value
@property
def extent(self):
"""The extent of the map element.
This extent is used by map element to render the extent
of the layer
:rtype: QgsRectangle
"""
return self._extent
@extent.setter
def extent(self, value):
"""Extent of map element.
:param value: Extent of map element to display
:type value: QgsRectangle
"""
self._extent = value
@property
def map_settings(self):
"""QgsMapSettings instance that will be used.
Used for QgsComposition
:rtype: qgis.core.QgsMapSettings
"""
return self._map_settings
@map_settings.setter
def map_settings(self, value):
"""QgsMapSettings instance.
:param value: QgsMapSettings for QgsComposition
:type value: qgis.core.QgsMapSettings
"""
self._map_settings = value
@property
def plot_style(self):
"""Constant options for composition rendering style.
Possible values:
- QgsComposition.PlotStyle.Preview
- QgsComposition.PlotStyle.Render
- QgsComposition.PlotStyle.Postscript
:rtype: QgsComposition.PlotStyle
"""
return self._plot_style
@property
def save_as_raster(self):
"""Boolean that indicates the composition will be saved as Raster.
:rtype: bool
"""
return self._save_as_raster
class ImpactReport(object):
"""A class for creating and generating report.
.. versionadded:: 4.0
"""
# constant for default PAGE_DPI settings
DEFAULT_PAGE_DPI = 300
REPORT_GENERATION_SUCCESS = 0
REPORT_GENERATION_FAILED = 1
class LayerException(Exception):
"""Class for Layer Exception.
Raised if layer being used is not valid.
"""
pass
def __init__(
self,
iface,
template_metadata,
impact_function=None,
hazard=None,
exposure=None,
impact=None,
analysis=None,
exposure_summary_table=None,
aggregation_summary=None,
extra_layers=None,
minimum_needs_profile=None):
"""Constructor for the Composition Report class.
:param iface: Reference to the QGIS iface object.
:type iface: QgsAppInterface
:param template_metadata: InaSAFE template metadata.
:type template_metadata: ReportMetadata
:param impact_function: Impact function instance for the report
:type impact_function:
safe.impact_function.impact_function.ImpactFunction
.. versionadded:: 4.0
"""
LOGGER.debug('InaSAFE Impact Report class initialised')
self._iface = iface
self._metadata = template_metadata
self._output_folder = None
self._impact_function = impact_function
self._hazard = hazard or self._impact_function.hazard
self._exposure = (
exposure or self._impact_function.exposure)
self._impact = (
impact or self._impact_function.impact)
self._analysis = (analysis or self._impact_function.analysis_impacted)
self._exposure_summary_table = (
exposure_summary_table or
self._impact_function.exposure_summary_table)
self._aggregation_summary = (
aggregation_summary or
self._impact_function.aggregation_summary)
if extra_layers is None:
extra_layers = []
self._extra_layers = extra_layers
self._minimum_needs = minimum_needs_profile
self._extent = self._iface.mapCanvas().extent()
self._inasafe_context = InaSAFEReportContext()
# QgsMapSettings is added in 2.4
map_settings = self._iface.mapCanvas().mapSettings()
self._qgis_composition_context = QGISCompositionContext(
None,
map_settings,
ImpactReport.DEFAULT_PAGE_DPI)
self._keyword_io = KeywordIO()
@property
def inasafe_context(self):
"""Reference to default InaSAFE Context.
:rtype: InaSAFEReportContext
"""
return self._inasafe_context
@property
def qgis_composition_context(self):
"""Reference to default QGIS Composition Context.
:rtype: QGISCompositionContext
"""
return self._qgis_composition_context
@property
def metadata(self):
"""Getter to the template.
:return: ReportMetadata
:rtype: safe.report.report_metadata.ReportMetadata
"""
return self._metadata
@property
def output_folder(self):
"""Output folder path for the rendering.
:rtype: str
"""
return self._output_folder
@output_folder.setter
def output_folder(self, value):
"""Output folder path for the rendering.
:param value: output folder path
:type value: str
"""
self._output_folder = value
if not os.path.exists(self._output_folder):
os.makedirs(self._output_folder)
@staticmethod
def absolute_output_path(
output_folder, components, component_key):
"""Return absolute output path of component.
:param output_folder: The base output folder
:type output_folder: str
:param components: The list of components to look up
:type components: list[ReportMetadata]
:param component_key: The component key
:type component_key: str
:return: absolute output path
:rtype: str
.. versionadded:: 4.0
"""
comp_keys = [c.key for c in components]
if component_key in comp_keys:
idx = comp_keys.index(component_key)
output_path = components[idx].output_path
if isinstance(output_path, str):
return os.path.abspath(
os.path.join(output_folder, output_path))
elif isinstance(output_path, list):
output_list = []
for path in output_path:
output_list.append(os.path.abspath(
os.path.join(output_folder, path)))
return output_list
elif isinstance(output_path, dict):
output_dict = {}
for key, path in output_path.iteritems():
output_dict[key] = os.path.abspath(
os.path.join(output_folder, path))
return output_dict
return None
def component_absolute_output_path(self, component_key):
"""Return absolute output path of component.
:param component_key: The component key
:type component_key: str
:return: absolute output path
:rtype: str
.. versionadded:: 4.0
"""
return ImpactReport.absolute_output_path(
self.output_folder,
self.metadata.components,
component_key)
@property
def impact_function(self):
"""Getter for impact function instance to use.
:rtype: safe.impact_function.impact_function.ImpactFunction
"""
return self._impact_function
def _check_layer_count(self, layer):
"""Check for the validity of the layer.
:param layer: QGIS layer
:type layer: qgis.core.QgsVectorLayer
:return:
"""
if layer:
if not layer.isValid():
raise ImpactReport.LayerException('Layer is not valid')
if isinstance(layer, QgsRasterLayer):
# can't check feature count of raster layer
return
feature_count = len([f for f in layer.getFeatures()])
if feature_count == 0:
raise ImpactReport.LayerException(
'Layer contains no features')
@property
def hazard(self):
"""Getter to hazard layer.
:rtype: qgis.core.QgsVectorLayer
"""
self._check_layer_count(self._hazard)
return self._hazard
@hazard.setter
def hazard(self, layer):
"""Hazard layer.
:param layer: hazard layer
:type layer: qgis.core.QgsVectorLayer
"""
self._hazard = layer
@property
def exposure(self):
"""Getter to exposure layer.
:rtype: qgis.core.QgsVectorLayer
"""
self._check_layer_count(self._exposure)
return self._exposure
@exposure.setter
def exposure(self, layer):
"""Exposure layer.
:param layer: exposure layer
:type layer: qgis.core.QgsVectorLayer
"""
self._impact = layer
@property
def impact(self):
"""Getter to layer that will be used for stats, legend, reporting.
:rtype: qgis.core.QgsVectorLayer
"""
self._check_layer_count(self._impact)
return self._impact
@impact.setter
def impact(self, layer):
"""Set the layer that will be used for stats, legend and reporting.
:param layer: Layer that will be used for stats, legend and reporting.
:type layer: qgis.core.QgsVectorLayer
"""
self._impact = layer
@property
def analysis(self):
"""Analysis layer.
:rtype: qgis.core.QgsVectorLayer
"""
self._check_layer_count(self._analysis)
return self._analysis
@analysis.setter
def analysis(self, layer):
"""Analysis layer.
:param layer: Analysis layer
:type layer: qgis.core.QgsVectorLayer
"""
self._analysis = layer
@property
def exposure_summary_table(self):
"""Exposure summary table.
:rtype: qgis.core.QgsVectorLayer
"""
# self._check_layer_count(self._exposure_summary_table)
return self._exposure_summary_table
@exposure_summary_table.setter
def exposure_summary_table(self, value):
"""Exposure summary table.
:param value: Exposure Summary Table
:type value: qgis.core.QgsVectorLayer
:return:
"""
self._exposure_summary_table = value
@property
def aggregation_summary(self):
"""Aggregation summary.
:rtype: qgis.core.QgsVectorLayer
"""
self._check_layer_count(self._aggregation_summary)
return self._aggregation_summary
@aggregation_summary.setter
def aggregation_summary(self, value):
"""Aggregation summary.
:param value: Aggregation Summary
:type value: qgis.core.QgsVectorLayer
"""
self._aggregation_summary = value
@property
def extra_layers(self):
"""Getter to extra layers.
extra layers will be rendered alongside impact layer
"""
return self._extra_layers
@extra_layers.setter
def extra_layers(self, extra_layers):
"""Set extra layers.
extra layers will be rendered alongside impact layer
:param extra_layers: List of QgsMapLayer
:type extra_layers: list(QgsMapLayer)
"""
self._extra_layers = extra_layers
@property
def minimum_needs(self):
"""Minimum needs.
:return: minimum needs used in impact report
:rtype: safe.gui.tools.minimum_needs.needs_profile.NeedsProfile
"""
return self._minimum_needs
@minimum_needs.setter
def minimum_needs(self, value):
"""Minimum needs.
:param value: minimum needs used in impact report
:type value: safe.gui.tools.minimum_needs.needs_profile.NeedsProfile
"""
self._minimum_needs = value
@property
def map_title(self):
"""Get the map title from the layer keywords if possible.
:returns: None on error, otherwise the title.
:rtype: None, str
"""
# noinspection PyBroadException
try:
title = self._keyword_io.read_keywords(
self.impact, 'map_title')
return title
except KeywordNotFoundError:
return None
except Exception: # pylint: disable=broad-except
return None
@property
def map_legend_attributes(self):
"""Get the map legend attribute from the layer keywords if possible.
:returns: None on error, otherwise the attributes (notes and units).
:rtype: None, str
"""
LOGGER.debug('InaSAFE Map getMapLegendAttributes called')
legend_attribute_list = [
'legend_notes',
'legend_units',
'legend_title']
legend_attribute_dict = {}
for legend_attribute in legend_attribute_list:
# noinspection PyBroadException
try:
legend_attribute_dict[legend_attribute] = \
self._keyword_io.read_keywords(
self.impact, legend_attribute)
except KeywordNotFoundError:
pass
except Exception: # pylint: disable=broad-except
pass
return legend_attribute_dict
def process_components(self):
"""Process context for each component and a given template.
:returns: Tuple of error code and message
:type: tuple
.. versionadded:: 4.0
"""
message = m.Message()
warning_heading = m.Heading(
tr('Report Generation issue'), **WARNING_STYLE)
message.add(warning_heading)
failed_extract_context = m.Heading(tr(
'Failed to extract context'), **WARNING_STYLE)
failed_render_context = m.Heading(tr(
'Failed to render context'), **WARNING_STYLE)
failed_find_extractor = m.Heading(tr(
'Failed to load extractor method'), **WARNING_STYLE)
failed_find_renderer = m.Heading(tr(
'Failed to load renderer method'), **WARNING_STYLE)
generation_error_code = self.REPORT_GENERATION_SUCCESS
for component in self.metadata.components:
# load extractors
try:
if not component.context:
if callable(component.extractor):
_extractor_method = component.extractor
else:
_package_name = (
'%(report-key)s.extractors.%(component-key)s')
_package_name %= {
'report-key': self.metadata.key,
'component-key': component.key
}
# replace dash with underscores
_package_name = _package_name.replace('-', '_')
_extractor_path = os.path.join(
self.metadata.template_folder,
component.extractor
)
_module = imp.load_source(
_package_name, _extractor_path)
_extractor_method = getattr(_module, 'extractor')
else:
LOGGER.info('Predefined context. Extractor not needed.')
except Exception as e: # pylint: disable=broad-except
generation_error_code = self.REPORT_GENERATION_FAILED
LOGGER.info(e)
if self.impact_function.debug_mode:
raise
else:
message.add(failed_find_extractor)
message.add(component.info)
message.add(get_error_message(e))
continue
# method signature:
# - this ImpactReport
# - this component
try:
if not component.context:
context = _extractor_method(self, component)
component.context = context
else:
LOGGER.info('Using predefined context.')
except Exception as e: # pylint: disable=broad-except
generation_error_code = self.REPORT_GENERATION_FAILED
LOGGER.info(e)
if self.impact_function.debug_mode:
raise
else:
message.add(failed_extract_context)
message.add(get_error_message(e))
continue
try:
# load processor
if callable(component.processor):
_renderer = component.processor
else:
_package_name = '%(report-key)s.renderer.%(component-key)s'
_package_name %= {
'report-key': self.metadata.key,
'component-key': component.key
}
# replace dash with underscores
_package_name = _package_name.replace('-', '_')
_renderer_path = os.path.join(
self.metadata.template_folder,
component.processor
)
_module = imp.load_source(_package_name, _renderer_path)
_renderer = getattr(_module, 'renderer')
except Exception as e: # pylint: disable=broad-except
generation_error_code = self.REPORT_GENERATION_FAILED
LOGGER.info(e)
if self.impact_function.debug_mode:
raise
else:
message.add(failed_find_renderer)
message.add(component.info)
message.add(get_error_message(e))
continue
# method signature:
# - this ImpactReport
# - this component
if component.context:
try:
output = _renderer(self, component)
output_path = self.component_absolute_output_path(
component.key)
if isinstance(output_path, dict):
try:
dirname = os.path.dirname(output_path.get('doc'))
except:
dirname = os.path.dirname(output_path.get('map'))
else:
dirname = os.path.dirname(output_path)
if component.resources:
for resource in component.resources:
target_resource = os.path.basename(resource)
target_dir = os.path.join(
dirname, 'resources', target_resource)
# copy here
shutil.copytree(resource, target_dir)
component.output = output
except Exception as e: # pylint: disable=broad-except
generation_error_code = self.REPORT_GENERATION_FAILED
LOGGER.info(e)
if self.impact_function.debug_mode:
raise
else:
message.add(failed_render_context)
message.add(get_error_message(e))
continue
return generation_error_code, message
| gpl-3.0 |
harshaneelhg/scikit-learn | sklearn/manifold/spectral_embedding_.py | 128 | 19845 | """Spectral Embedding"""
# Author: Gael Varoquaux <[email protected]>
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
connected_components_matrix = np.zeros(
shape=(graph.shape[0]), dtype=np.bool)
connected_components_matrix[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components_matrix.sum()
_, node_to_add = np.where(graph[connected_components_matrix] != 0)
connected_components_matrix[node_to_add] = True
if last_num_component >= connected_components_matrix.sum():
break
return connected_components_matrix
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
ltilve/chromium | third_party/mojo/src/mojo/public/tools/bindings/pylib/mojom_tests/generate/data_unittest.py | 10 | 5222 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import data
from mojom.generate import module as mojom
class DataTest(unittest.TestCase):
def testStructDataConversion(self):
"""Tests that a struct can be converted from data."""
module = mojom.Module('test_module', 'test_namespace')
struct_data = {
'name': 'SomeStruct',
'enums': [],
'constants': [],
'fields': [
{'name': 'field1', 'kind': 'i32'},
{'name': 'field2', 'kind': 'i32', 'ordinal': 10},
{'name': 'field3', 'kind': 'i32', 'default': 15}]}
struct = data.StructFromData(module, struct_data)
struct.fields = map(lambda field:
data.FieldFromData(module, field, struct), struct.fields_data)
self.assertEquals(struct_data, data.StructToData(struct))
def testUnionDataConversion(self):
"""Tests that a union can be converted from data."""
module = mojom.Module('test_module', 'test_namespace')
union_data = {
'name': 'SomeUnion',
'fields': [
{'name': 'field1', 'kind': 'i32'},
{'name': 'field2', 'kind': 'i32', 'ordinal': 10}]}
union = data.UnionFromData(module, union_data)
union.fields = map(lambda field:
data.FieldFromData(module, field, union), union.fields_data)
self.assertEquals(union_data, data.UnionToData(union))
def testImportFromDataNoMissingImports(self):
"""Tests that unions, structs, interfaces and enums are imported."""
module = mojom.Module('test_module', 'test_namespace')
imported_module = mojom.Module('import_module', 'import_namespace')
#TODO(azani): Init values in module.py.
#TODO(azani): Test that values are imported.
imported_module.values = {}
imported_data = {'module' : imported_module}
struct = mojom.Struct('TestStruct', module=module)
imported_module.kinds[struct.spec] = struct
union = mojom.Union('TestUnion', module=module)
imported_module.kinds[union.spec] = union
interface = mojom.Interface('TestInterface', module=module)
imported_module.kinds[interface.spec] = interface
enum = mojom.Enum('TestEnum', module=module)
imported_module.kinds[enum.spec] = enum
data.ImportFromData(module, imported_data)
# Test that the kind was imported.
self.assertIn(struct.spec, module.kinds)
self.assertEquals(struct.name, module.kinds[struct.spec].name)
self.assertIn(union.spec, module.kinds)
self.assertEquals(union.name, module.kinds[union.spec].name)
self.assertIn(interface.spec, module.kinds)
self.assertEquals(interface.name, module.kinds[interface.spec].name)
self.assertIn(enum.spec, module.kinds)
self.assertEquals(enum.name, module.kinds[enum.spec].name)
# Test that the imported kind is a copy and not the original.
self.assertIsNot(struct, module.kinds[struct.spec])
self.assertIsNot(union, module.kinds[union.spec])
self.assertIsNot(interface, module.kinds[interface.spec])
self.assertIsNot(enum, module.kinds[enum.spec])
def testImportFromDataNoExtraneousImports(self):
"""Tests that arrays, maps and interface requests are not imported."""
module = mojom.Module('test_module', 'test_namespace')
imported_module = mojom.Module('import_module', 'import_namespace')
#TODO(azani): Init values in module.py.
imported_module.values = {}
imported_data = {'module' : imported_module}
array = mojom.Array(mojom.INT16, length=20)
imported_module.kinds[array.spec] = array
map_kind = mojom.Map(mojom.INT16, mojom.INT16)
imported_module.kinds[map_kind.spec] = map_kind
interface = mojom.Interface('TestInterface', module=module)
imported_module.kinds[interface.spec] = interface
interface_req = mojom.InterfaceRequest(interface)
imported_module.kinds[interface_req.spec] = interface_req
data.ImportFromData(module, imported_data)
self.assertNotIn(array.spec, module.kinds)
self.assertNotIn(map_kind.spec, module.kinds)
self.assertNotIn(interface_req.spec, module.kinds)
def testNonInterfaceAsInterfaceRequest(self):
"""Tests that a non-interface cannot be used for interface requests."""
module = mojom.Module('test_module', 'test_namespace')
interface = mojom.Interface('TestInterface', module=module)
method_dict = {
'name': 'Foo',
'parameters': [{'name': 'foo', 'kind': 'r:i32'}],
}
with self.assertRaises(Exception) as e:
data.MethodFromData(module, method_dict, interface)
self.assertEquals(e.exception.__str__(),
'Interface request requires \'i32\' to be an interface.')
| bsd-3-clause |
apocquet/django | tests/m2o_recursive/tests.py | 419 | 1724 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Category, Person
class ManyToOneRecursiveTests(TestCase):
def setUp(self):
self.r = Category(id=None, name='Root category', parent=None)
self.r.save()
self.c = Category(id=None, name='Child category', parent=self.r)
self.c.save()
def test_m2o_recursive(self):
self.assertQuerysetEqual(self.r.child_set.all(),
['<Category: Child category>'])
self.assertEqual(self.r.child_set.get(name__startswith='Child').id, self.c.id)
self.assertEqual(self.r.parent, None)
self.assertQuerysetEqual(self.c.child_set.all(), [])
self.assertEqual(self.c.parent.id, self.r.id)
class MultipleManyToOneRecursiveTests(TestCase):
def setUp(self):
self.dad = Person(full_name='John Smith Senior', mother=None, father=None)
self.dad.save()
self.mom = Person(full_name='Jane Smith', mother=None, father=None)
self.mom.save()
self.kid = Person(full_name='John Smith Junior', mother=self.mom, father=self.dad)
self.kid.save()
def test_m2o_recursive2(self):
self.assertEqual(self.kid.mother.id, self.mom.id)
self.assertEqual(self.kid.father.id, self.dad.id)
self.assertQuerysetEqual(self.dad.fathers_child_set.all(),
['<Person: John Smith Junior>'])
self.assertQuerysetEqual(self.mom.mothers_child_set.all(),
['<Person: John Smith Junior>'])
self.assertQuerysetEqual(self.kid.mothers_child_set.all(), [])
self.assertQuerysetEqual(self.kid.fathers_child_set.all(), [])
| bsd-3-clause |
baryonix/collectd | contrib/network-proxy.py | 105 | 1677 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# Simple unicast proxy to send collectd traffic to another host/port.
# Copyright (C) 2007 Pavel Shramov <shramov at mexmat.net>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
"""
Simple unicast proxy for collectd (>= 4.0).
Binds to 'local' address and forwards all traffic to 'remote'.
"""
import socket
import struct
""" Local multicast group/port"""
local = ("239.192.74.66", 25826)
""" Address to send packets """
remote = ("grid.pp.ru", 35826)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
mreq = struct.pack("4sl", socket.inet_aton(local[0]), socket.INADDR_ANY)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.bind(local)
out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if __name__ == "__main__":
while True:
(buf, addr) = sock.recvfrom(2048)
sock.sendto(buf, remote)
| gpl-2.0 |
wimnat/ansible | test/lib/ansible_test/_data/collection_detail.py | 40 | 3000 | """Retrieve collection detail."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import sys
import yaml
# See semantic versioning specification (https://semver.org/)
NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)'
ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)'
PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')'
BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')'
VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER
PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?'
BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?'
SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$'
def validate_version(version):
"""Raise exception if the provided version is not None or a valid semantic version."""
if version is None:
return
if not re.match(SEMVER_REGULAR_EXPRESSION, version):
raise Exception('Invalid version number "{0}". Collection version numbers must '
'follow semantic versioning (https://semver.org/).'.format(version))
def read_manifest_json(collection_path):
"""Return collection information from the MANIFEST.json file."""
manifest_path = os.path.join(collection_path, 'MANIFEST.json')
if not os.path.exists(manifest_path):
return None
try:
with open(manifest_path) as manifest_file:
manifest = json.load(manifest_file)
collection_info = manifest.get('collection_info') or dict()
result = dict(
version=collection_info.get('version'),
)
validate_version(result['version'])
except Exception as ex: # pylint: disable=broad-except
raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex))
return result
def read_galaxy_yml(collection_path):
"""Return collection information from the galaxy.yml file."""
galaxy_path = os.path.join(collection_path, 'galaxy.yml')
if not os.path.exists(galaxy_path):
return None
try:
with open(galaxy_path) as galaxy_file:
galaxy = yaml.safe_load(galaxy_file)
result = dict(
version=galaxy.get('version'),
)
validate_version(result['version'])
except Exception as ex: # pylint: disable=broad-except
raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex))
return result
def main():
"""Retrieve collection detail."""
collection_path = sys.argv[1]
try:
result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or dict()
except Exception as ex: # pylint: disable=broad-except
result = dict(
error='{0}'.format(ex),
)
print(json.dumps(result))
if __name__ == '__main__':
main()
| gpl-3.0 |
alphafoobar/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/it/util.py | 436 | 1807 | from django.utils.encoding import smart_str, smart_unicode
def ssn_check_digit(value):
"Calculate Italian social security number check digit."
ssn_even_chars = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7,
'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15,
'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20, 'V': 21, 'W': 22, 'X': 23,
'Y': 24, 'Z': 25
}
ssn_odd_chars = {
'0': 1, '1': 0, '2': 5, '3': 7, '4': 9, '5': 13, '6': 15, '7': 17, '8':
19, '9': 21, 'A': 1, 'B': 0, 'C': 5, 'D': 7, 'E': 9, 'F': 13, 'G': 15,
'H': 17, 'I': 19, 'J': 21, 'K': 2, 'L': 4, 'M': 18, 'N': 20, 'O': 11,
'P': 3, 'Q': 6, 'R': 8, 'S': 12, 'T': 14, 'U': 16, 'V': 10, 'W': 22,
'X': 25, 'Y': 24, 'Z': 23
}
# Chars from 'A' to 'Z'
ssn_check_digits = [chr(x) for x in range(65, 91)]
ssn = value.upper()
total = 0
for i in range(0, 15):
try:
if i % 2 == 0:
total += ssn_odd_chars[ssn[i]]
else:
total += ssn_even_chars[ssn[i]]
except KeyError:
msg = "Character '%(char)s' is not allowed." % {'char': ssn[i]}
raise ValueError(msg)
return ssn_check_digits[total % 26]
def vat_number_check_digit(vat_number):
"Calculate Italian VAT number check digit."
normalized_vat_number = smart_str(vat_number).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
quotient , remainder = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
return smart_unicode((10 - total % 10) % 10)
| apache-2.0 |
SpectreJan/gnuradio | gr-digital/python/digital/test_soft_decisions.py | 2 | 4908 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy, pylab, sys
from gnuradio import digital
from soft_dec_lut_gen import soft_dec_table, calc_soft_dec_from_table, calc_soft_dec
from psk_constellations import psk_4_0, psk_4_1, psk_4_2, psk_4_3, psk_4_4, psk_4_5, psk_4_6, psk_4_7, sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3, sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7
from qam_constellations import qam_16_0, sd_qam_16_0
def test_qpsk(i, sample, prec):
qpsk_const_list = [psk_4_0, psk_4_1, psk_4_2, psk_4_3,
psk_4_4, psk_4_5, psk_4_6, psk_4_7]
qpsk_lut_gen_list = [sd_psk_4_0, sd_psk_4_1, sd_psk_4_2, sd_psk_4_3,
sd_psk_4_4, sd_psk_4_5, sd_psk_4_6, sd_psk_4_7]
constel, code = qpsk_const_list[i]()
qpsk_lut_gen = qpsk_lut_gen_list[i]
rot_sym = 1
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([numpy.sqrt(constel_i.real**2 + constel_i.imag**2) for constel_i in constel])
#table = soft_dec_table_generator(qpsk_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec)
c.gen_soft_dec_lut(prec)
#c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qpsk_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
def test_qam16(i, sample, prec):
sample = sample/1
qam_const_list = [qam_16_0, ]
qam_lut_gen_list = [sd_qam_16_0, ]
constel, code = qam_const_list[i]()
qam_lut_gen = qam_lut_gen_list[i]
rot_sym = 4
side = 2
width = 2
c = digital.constellation_rect(constel, code, rot_sym,
side, side, width, width)
# Get max energy/symbol in constellation
constel = c.points()
Es = max([abs(constel_i) for constel_i in constel])
#table = soft_dec_table_generator(qam_lut_gen, prec, Es)
table = soft_dec_table(constel, code, prec, 1)
#c.gen_soft_dec_lut(prec)
c.set_soft_dec_lut(table, prec)
y_python_gen_calc = qam_lut_gen(sample, Es)
y_python_table = calc_soft_dec_from_table(sample, table, prec, Es)
y_python_raw_calc = calc_soft_dec(sample, constel, code, 1)
y_cpp_table = c.soft_decision_maker(sample)
y_cpp_raw_calc = c.calc_soft_dec(sample)
return (y_python_gen_calc, y_python_table, y_python_raw_calc,
y_cpp_table, y_cpp_raw_calc, constel, code, c)
if __name__ == "__main__":
index = 0
prec = 8
x_re = 2*numpy.random.random()-1
x_im = 2*numpy.random.random()-1
x = x_re + x_im*1j
#x = -1 + -0.j
if 1:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qpsk(index, x, prec)
else:
y_python_gen_calc, y_python_table, y_python_raw_calc, \
y_cpp_table, y_cpp_raw_calc, constel, code, c \
= test_qam16(index, x, prec)
k = numpy.log2(len(constel))
print "Sample: ", x
print "Python Generator Calculated: ", (y_python_gen_calc)
print "Python Generator Table: ", (y_python_table)
print "Python Raw calc: ", (y_python_raw_calc)
print "C++ Table calc: ", (y_cpp_table)
print "C++ Raw calc: ", (y_cpp_raw_calc)
fig = pylab.figure(1)
sp1 = fig.add_subplot(1,1,1)
sp1.plot([c.real for c in constel],
[c.imag for c in constel], 'bo')
sp1.plot(x.real, x.imag, 'ro')
sp1.set_xlim([-1.5, 1.5])
sp1.set_ylim([-1.5, 1.5])
fill = int(numpy.log2(len(constel)))
for i,c in enumerate(constel):
sp1.text(1.2*c.real, 1.2*c.imag, bin(code[i])[2:].zfill(fill),
ha='center', va='center', size=18)
pylab.show()
| gpl-3.0 |
dmsimard/ansible | lib/ansible/plugins/lookup/pipe.py | 18 | 2941 | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
name: pipe
author: Daniel Hokka Zakrisson (!UNKNOWN) <[email protected]>
version_added: "0.9"
short_description: read output from a command
description:
- Run a command and return the output.
options:
_terms:
description: command(s) to run.
required: True
notes:
- Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
so if you need to different permissions you must change the command or run Ansible as another user.
- Alternatively you can use a shell/command task that runs against localhost and registers the result.
- Pipe lookup internally invokes Popen with shell=True (this is required and intentional).
This type of invocation is considered as security issue if appropriate care is not taken to sanitize any user provided or variable input.
It is strongly recommended to pass user input or variable input via quote filter before using with pipe lookup.
See example section for this.
Read more about this L(Bandit B602 docs,https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html)
"""
EXAMPLES = r"""
- name: raw result of running date command"
debug:
msg: "{{ lookup('pipe', 'date') }}"
- name: Always use quote filter to make sure your variables are safe to use with shell
debug:
msg: "{{ lookup('pipe', 'getent passwd ' + myuser | quote ) }}"
"""
RETURN = r"""
_string:
description:
- stdout from command
type: list
elements: str
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
term = str(term)
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| gpl-3.0 |
appsembler/edx-platform | cms/celery.py | 1 | 2978 | """
Import celery, load its settings from the django settings
and auto discover tasks in all installed django apps.
Taken from: http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
"""
from __future__ import absolute_import
import beeline
import logging
import os
from celery import Celery
from celery.signals import worker_process_init, task_prerun, task_postrun
from django.conf import settings
from openedx.core.lib.celery.routers import AlternateEnvironmentRouter
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings')
APP = Celery('proj')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
APP.config_from_object('django.conf:settings')
APP.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
class Router(AlternateEnvironmentRouter):
"""
An implementation of AlternateEnvironmentRouter, for routing tasks to non-cms queues.
"""
@property
def alternate_env_tasks(self):
"""
Defines alternate environment tasks, as a dict of form { task_name: alternate_queue }
"""
# The tasks below will be routed to the default lms queue.
return {
'completion_aggregator.tasks.update_aggregators': 'lms',
'openedx.core.djangoapps.content.block_structure.tasks.update_course_in_cache': 'lms',
'openedx.core.djangoapps.content.block_structure.tasks.update_course_in_cache_v2': 'lms',
}
@property
def explicit_queues(self):
"""
Defines specific queues for tasks to run in (typically outside of the cms environment),
as a dict of form { task_name: queue_name }.
"""
return {
'lms.djangoapps.grades.tasks.compute_all_grades_for_course': settings.POLICY_CHANGE_GRADES_ROUTING_KEY,
}
# honeycomb setup
@worker_process_init.connect
def initialize_honeycomb(**kwargs):
if settings.HONEYCOMB_WRITEKEY and settings.HONEYCOMB_DATASET:
logging.info('beeline initialization in process pid {}'.format(os.getpid()))
beeline.init(
writekey=settings.HONEYCOMB_WRITEKEY,
dataset=settings.HONEYCOMB_DATASET,
service_name='cms-celery'
)
@task_prerun.connect
def start_celery_trace(task_id, task, args, kwargs, **rest_args):
queue_name = task.request.delivery_info.get("exchange", None)
task.request.trace = beeline.start_trace(
context={
"name": "celery",
"celery.task_id": task_id,
"celery.args": args,
"celery.kwargs": kwargs,
"celery.task_name": task.name,
"celery.queue": queue_name,
}
)
# optional: finish and send the trace at the end of each task
@task_postrun.connect
def end_celery_trace(task, state, **kwargs):
beeline.add_field("celery.status", state)
beeline.finish_trace(task.request.trace)
| agpl-3.0 |
daydayuplo/gee | earth_enterprise/src/scons/khEnvironment.py | 3 | 18276 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The build environment, builders, actions and helper methods.
Define build environments, builders, actions and helper methods in this
central place and reuse it in all SConscripts as much as possible.
"""
import os
import os.path
import sys
import time
import SCons
from SCons.Environment import Environment
def AppendToFlags(target, env, key, to_add):
if not SCons.Util.is_List(to_add):
to_add = [to_add]
tmp = target.get(key, env.get(key, []))
if not SCons.Util.is_List(tmp):
tmp = [tmp]
target[key] = tmp + to_add
def PrependToFlags(target, env, key, to_add):
if not SCons.Util.is_List(to_add):
to_add = [to_add]
tmp = target.get(key, env.get(key, []))
if not SCons.Util.is_List(tmp):
tmp = [tmp]
target[key] = to_add + tmp
# Qt stuff - yanked from scons-users mailing list archive
def Emitter(env, target, source):
base = SCons.Util.splitext(str(source[0].name))[0]
uidir = os.path.join(str(target[0].get_dir()), '.ui')
hfile = os.path.join(uidir, base+'.h')
cppfile = os.path.join(uidir, base+'.cpp')
mocdir = os.path.join(str(target[0].get_dir()), '.moc')
mocfile = os.path.join(mocdir, 'moc_' + base + '.cpp')
env.uic_impl(cppfile, [hfile, source])
env.moc(mocfile, hfile)
return [hfile], [source]
uic = SCons.Builder.Builder(action='$UIC $SOURCE -o $TARGET',
emitter=Emitter)
uic_impl = SCons.Builder.Builder(action='$UIC -o $TARGET -impl $SOURCES')
moc = SCons.Builder.Builder(action='$MOC -o $TARGET $SOURCE')
# pylint: disable=W0104
def CleanupLibFlags(prefix, a_list, suffix, stripprefix, stripsuffix, env):
a_list = env['_oldstripixes'](prefix, a_list, suffix,
stripprefix, stripsuffix, env)
return a_list
def AddSourceScannerToTargets(target, source, env):
for t in target:
if t.source_scanner is None:
key = t.scanner_key()
scanner = env.get_scanner(key)
if scanner:
t.source_scanner = scanner
return (target, source)
idl_h_builder = SCons.Builder.Builder(
action='$KHIDL --hfile $TARGET $SOURCE',
suffix='.h',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
idl_impl_h_builder = SCons.Builder.Builder(
action='$KHIDL --impl_hfile $TARGET $SOURCE',
suffix='_impl.h',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
idl_cpp_builder = SCons.Builder.Builder(
action='$KHIDL --cppfile $TARGET $SOURCE',
suffix='.cpp',
src_suffix='.idl',
# emitter=AddSourceScannerToTargets,
)
def AliasBuilder(env, target, source):
(env, target, source) = (env, target, source) # Silence gpylint
def NoOutput(target, source, env):
(env, target, source) = (env, target, source) # Silence gpylint
return None
my_alias_builder = SCons.Builder.Builder(
action=SCons.Action.Action(AliasBuilder, NoOutput),
target_factory=SCons.Node.Alias.default_ans.Alias,
source_factory=SCons.Node.FS.Entry,
multi=1,
is_explicit=None,
name='my_alias_builder')
def WriteToFileFunc(file_name, strn):
"""Writes strn to file_name.
Args:
file_name: The file to which to write
strn: The string to write
"""
base_path = os.path.dirname(os.path.abspath(file_name))
os.system('test -d %s || mkdir -p %s' % (base_path, base_path))
f = open(file_name, 'w')
f.write(strn)
f.close()
def WriteToFileStrfunc(file_name, strn):
return 'WriteToFile(%s, %s)' % (file_name, strn)
def EmitBuildDateFunc(target, build_date):
"""Emits build date information to target file."""
fp = open(target, 'w')
fp.writelines(['// DO NOT MODIFY - auto-generated file\n',
'extern const char *const BUILD_DATE = "' +
time.strftime('%Y-%m-%d', build_date) + '";\n',
'extern const char *const BUILD_YEAR = "' +
time.strftime('%Y', build_date) + '";\n',
'extern const char *const BUILD_MONTH = "' +
time.strftime('%m', build_date) + '";\n',
'extern const char *const BUILD_DAY = "' +
time.strftime('%d', build_date) + '";\n',
])
fp.close()
def EmitBuildDateStrfunc(target, build_date):
return 'EmitBuildDate(%s, %s)' % (target, build_date)
# our derived class
class khEnvironment(Environment):
"""The derived environment class used in all of Fusion SConscripts."""
WriteToFile = SCons.Action.ActionFactory(WriteToFileFunc,
WriteToFileStrfunc)
EmitBuildDate = SCons.Action.ActionFactory(EmitBuildDateFunc,
EmitBuildDateStrfunc)
rsync_cmd = 'rsync -rltpvu %s %s'
rsync_excl_cmd = 'rsync -rltpvu --exclude %s %s %s'
def __init__(self,
exportdirs,
installdirs,
platform=SCons.Platform.Platform(),
tools=None,
toolpath=None,
options=None,
**kw):
if toolpath is None:
toolpath = []
args = (self, platform, tools, toolpath, options)
Environment.__init__(*args, **kw)
self.exportdirs = exportdirs
self.installdirs = installdirs
self['BUILDERS']['uic'] = uic
self['BUILDERS']['uic_impl'] = uic_impl
self['BUILDERS']['moc'] = moc
self['BUILDERS']['IDLH'] = idl_h_builder
self['BUILDERS']['IDLIMPLH'] = idl_impl_h_builder
self['BUILDERS']['IDLCPP'] = idl_cpp_builder
self['_oldstripixes'] = self['_stripixes']
self['_stripixes'] = CleanupLibFlags
DefineProtocolBufferBuilder(self)
def DeepCopy(self):
other = self.Clone()
other.MultiCommand = SCons.Action.ActionFactory(other.MultiCommandFunc,
other.MultiCommandStrfunc)
return other
def MultiCommandFunc(self, cmd):
"""Runs multiple commands in a single shell.
Args:
cmd: The bash commands (may be multiple lines)
Returns:
The return status of executing the command.
"""
return self.Execute('set -x && %s' % cmd.replace('\n', ' && '))
def MultiCommandStrfunc(self, cmd):
if SCons.SConf.dryrun:
return '+ %s' % cmd.replace('\n', '\n+ ')
else:
return ''
# Defines a Phony target that doesn't depend on anything and is always
# executed.
def PhonyTargets(self, **kw):
ret_val = []
for target, actions in kw.items():
ret_val.append(self.AlwaysBuild(self.Alias(target, [], actions)))
return ret_val
# Install the file or directory as a part of install target.
# Do this only after dependency is built.
def InstallFileOrDir(self, source, destination, dependency, alias_name):
base_path = os.path.dirname(os.path.abspath(destination))
actions = ['test -d %s || mkdir -p %s' % (base_path, base_path),
self.rsync_cmd % (source, destination)]
if dependency:
self.Depends(self.Alias(alias_name), dependency)
this_dict = {alias_name: actions}
return self.PhonyTargets(**this_dict)
# TODO: looking for removal of this by
# env.Clean(depends_on, list_of_files_to_remove)
# The following is an work around as the above doesn't work for symbolic
# links due to scons bug. The suggested patch to scons is as in
# http://osdir.com/ml/programming.tools.scons.devel/2008-07/msg00100.html
def ExecuteOnClean(self, cmd):
if self.GetOption('clean'):
self.Execute(self.MultiCommand(cmd))
def UpdateCppflagsForSkia(self):
"""Update c++ flags for Skia code compilation."""
if self['release']:
self['CPPFLAGS'] += ['-DSK_RELEASE', '-DGR_RELEASE',
'-DSkDebugf="(void)"']
elif self['optimize']:
self['CPPFLAGS'] += ['-DSK_RELEASE', '-DGR_RELEASE',
'-DSkDebugf="(void)"']
else:
self['CPPFLAGS'] += ['-DSK_DEBUG', '-DGR_DEBUG']
if sys.byteorder == 'little':
self['CPPFLAGS'] += ['-DSK_R32_SHIFT=16', '-DSK_G32_SHIFT=8',
'-DSK_B32_SHIFT=0', '-DSK_A32_SHIFT=24']
else:
self['CPPFLAGS'] += ['-DSK_R32_SHIFT=8', '-DSK_G32_SHIFT=16',
'-DSK_B32_SHIFT=24', '-DSK_A32_SHIFT=0']
self['CPPFLAGS'] += [
'-DSK_SCALAR_IS_FLOAT', '-DSkUserConfig_DEFINED',
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/config'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/core'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/effects'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/images'),
'-I' + os.path.join(self.exportdirs['root'], self['skia_rel_dir'],
'include/lazy')
]
def staticLib(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
target = os.path.join(self.exportdirs['lib'], base)
args = (target, source)
ret = self.StaticLibrary(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def sharedLib(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
target = os.path.join(self.exportdirs['lib'], base)
args = (target, source)
ret = self.SharedLibrary(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def executable(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], base)
args = (newtarget, source)
ret = self.Program(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def test(self, target, source, **kw):
# path to the target in the srcdir (not builddir)
target_src_node = self.arg2nodes(target)[0].srcnode()
base = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], 'tests', base)
args = (newtarget, source)
ret = self.Program(*args, **kw)
self.Default(self.alias(target_src_node, ret))
return ret
def executableLink(self, dest, target, source, **unused_kw):
"""path to the target in the srcdir (not builddir)."""
target_src_node = self.arg2nodes(target)[0].srcnode()
targetbase = os.path.basename(target)
newtarget = os.path.join(self.exportdirs['bin'], targetbase)
sourcebase = os.path.basename(source)
newsource = os.path.join(self.exportdirs['bin'], sourcebase)
ret = self.Command(newtarget, [newsource],
['ln -sf ${SOURCE.file} $TARGET'])
self.Command(self.fs.File(targetbase, self.installdirs[dest]),
[self.fs.File(sourcebase, self.installdirs[dest])],
['ln $SOURCE $TARGET',
'chmod a+x $TARGET'])
self.Default(self.alias(target_src_node, ret))
return ret
def installedExecutableSymlink(self, dest, target, source, **unused_kw):
"""path to the target in the srcdir (not builddir)."""
targetbase = os.path.basename(target)
sourcebase = os.path.basename(source)
return self.Command(
self.fs.File(targetbase, self.installdirs[dest]),
[self.fs.File(sourcebase, self.installdirs[dest])],
['ln -sf ${SOURCE.file} $TARGET'])
def install(self, dest, target, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
if not SCons.Util.is_List(target):
target = [target]
self.Install(instdir, target)
def installAs(self, dest, src, newname, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
if not SCons.Util.is_List(src):
src = [src]
if not SCons.Util.is_List(newname):
newname = [newname]
self.InstallAs([self.fs.File(i, instdir) for i in newname], src)
def installDirExcluding(self, dest, target_dir, excluded_list, subdir=''):
instdir = self.fs.Dir(subdir, self.installdirs[dest])
self.installDirExcludingInternal(instdir, target_dir, excluded_list)
def installDirExcludingInternal(self, instdir, target_dir, excluded_list):
"""Get contents of target_dir and install in instdir."""
contents = os.listdir(target_dir)
target_dir += '/'
if not os.path.exists(instdir.get_abspath()):
os.makedirs(instdir.get_abspath())
for file_name in contents:
if file_name in excluded_list:
continue
target_file = target_dir + file_name
if os.path.isdir(target_file):
subdir = self.fs.Dir(file_name, instdir)
self.installDirExcludingInternal(subdir, target_file,
excluded_list)
else:
self.Install(instdir, target_file)
def copyfile(self, destdir, target, subdir=''):
instdir = self.fs.Dir(subdir, destdir)
ret = self.Install(instdir, target)
self.Default(self.alias(self.arg2nodes('all')[0].srcnode(), ret))
return ret
def qtFiles(self, uifiles, hfiles, imgfiles, prjbase):
for ui in uifiles:
self.uic(ui)
# now strip extentions from .ui & .h files
uifiles = [os.path.splitext(str(i))[0] for i in uifiles]
hfiles = [os.path.splitext(str(i))[0] for i in hfiles]
for h in hfiles:
self.moc('.moc/moc_'+h+'.cpp', h+'.h')
if imgfiles:
imgcollect = [self.Command('.ui/image_collection.cpp', imgfiles,
'$UIC -embed %s $SOURCES -o $TARGET' % (
prjbase))
]
else:
imgcollect = []
uicpps = ['.ui/' + u + '.cpp' for u in uifiles]
uimoccpps = ['.moc/moc_' + u + '.cpp' for u in uifiles]
hmoccpps = ['.moc/moc_' + h + '.cpp' for h in hfiles]
return uicpps + uimoccpps + hmoccpps + imgcollect
def idl(self, sources):
for idlfile in sources:
base = os.path.splitext(str(idlfile))[0]
self.IDLH('.idl/%s.h' % base, [idlfile, self['KHIDL']])
self.IDLIMPLH('.idl/%s_impl.h' % base, [idlfile, self['KHIDL']])
self.IDLCPP('.idl/%s.cpp' % base, [idlfile, self['KHIDL']])
def alias(self, target, source=None):
if source is None:
source = []
tlist = self.arg2nodes(target, self.ans.Alias)
if not SCons.Util.is_List(source):
source = [source]
source = filter(None, source)
# Re-call all the target builders to add the sources to each target.
result = []
for t in tlist:
bld = t.get_builder() or my_alias_builder
result.extend(bld(self, t, source))
return result
def ObjFromOtherDir(self, sources):
if not SCons.Util.is_List(sources):
sources = [sources]
root_dir = self.exportdirs['root']
shobj_suffix = self['SHOBJSUFFIX']
return [root_dir + p + shobj_suffix for p in sources if p]
def ProtocolBufferGenerator(source, target, env, for_signature):
"""Protocol buffer generator builder.
Args:
source: List of source nodes
target: List of target nodes
env: Environment in which to build
for_signature: Just generate command for build signature; don't actually
run it.
Returns:
protocol buffer generator.
"""
(env, target, source) = (env, target, source) # Silence gpylint
for_signature = for_signature # Silence gpylint
# Must run the protocol buffer compiler from the source directory!
command = ('cd ${SOURCES.dir}; '
'${TOOLS_BIN.abspath}/${PROTOBUF_COMPILER} '
'--cpp_out $PROTOBUF_OUT_ROOT ${SOURCES.file}')
return [command]
def ProtocolBufferEmitter(target, source, env):
"""Protocol buffer emitter.
Args:
target: List of target nodes
source: List of source nodes
env: Environment in which to build
Returns:
New (target, source).
"""
env = env # Silence gpylint
# regardless of where the source comes from, we want to put the output files
# (.pb.cc and .pb.h) into the PROTOBUF_OUT_ROOT directory.
out_dir = env['PROTOBUF_OUT_ROOT'] + '/'
# get the basename (non directory) of our source
sourcebase = os.path.basename(str(source[0]))
# strip off source extension and replace it with the two we want
targetcc = out_dir + os.path.splitext(sourcebase)[0] + '.pb.cc'
targeth = out_dir + os.path.splitext(sourcebase)[0] + '.pb.h'
# build a new list of targets (ignoring anything that scons already has there)
target = [targetcc, targeth]
return target, source
def DefineProtocolBufferBuilder(env):
# Note: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool.
Args:
env: Environment to modify.
"""
# All protocol buffer generated files will be placed in the export directory
# under protobuf.
# To include them, the caller need only include "protobuf/xxx.pb.h"
out_dir = os.path.join(env.exportdirs['root'], 'protobuf')
out_dir = out_dir.strip('#')
out_dir = os.path.abspath(out_dir)
env.Replace(
# Root of output; files will be placed in subdirs of this mirroring the
# source tree.
PROTOBUF_OUT_ROOT=out_dir
)
# Set tool based on local platform
env['TOOLS_BIN'] = env.fs.Dir('../tools/bin/')
env['PROTOBUF_COMPILER'] = 'protoc'
# Add protocol buffer builder
bld = SCons.Script.Builder(generator=ProtocolBufferGenerator,
emitter=ProtocolBufferEmitter,
single_source=1,
suffix='.pb.cc')
env.Append(BUILDERS={'ProtocolBuffer': bld})
| apache-2.0 |
redapple/parslepy | tests/test_parslepy_parse.py | 1 | 10453 | from __future__ import unicode_literals
import parslepy
import parslepy.base
import lxml.cssselect
from nose.tools import *
from .tools import *
import pprint
import os
def test_parslepy_xpathparse_xml_file():
parselet_script = {"id": "//atom:id"}
xsh = parslepy.selectors.XPathSelectorHandler(
namespaces={'atom': 'http://www.w3.org/2005/Atom'}
)
dirname = os.path.dirname(os.path.abspath(__file__))
fp = open(os.path.join(dirname, 'data/itunes.topalbums.rss'))
expected = {
'id': 'https://itunes.apple.com/us/rss/topalbums/limit=10/explicit=true/xml'
}
parselet = parslepy.Parselet(parselet_script, selector_handler=xsh)
extracted = parselet.parse(fp, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
def test_parslepy_defaultparse_xml_file():
parselet_script = {"id": "//atom:id"}
dsh = parslepy.selectors.DefaultSelectorHandler(
namespaces={'atom': 'http://www.w3.org/2005/Atom'}
)
dirname = os.path.dirname(os.path.abspath(__file__))
fp = open(os.path.join(dirname, 'data/itunes.topalbums.rss'))
expected = {
'id': 'https://itunes.apple.com/us/rss/topalbums/limit=10/explicit=true/xml'
}
parselet = parslepy.Parselet(parselet_script, selector_handler=dsh)
extracted = parselet.parse(fp, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
def test_parslepy_defaultparse_xml_file_cssselectors():
parselet_script = {"id": "atom|id", "imid": "atom|id @im|id"}
dsh = parslepy.selectors.DefaultSelectorHandler(
namespaces={
'atom': 'http://www.w3.org/2005/Atom',
'im': 'http://itunes.apple.com/rss',
}
)
dirname = os.path.dirname(os.path.abspath(__file__))
fp = open(os.path.join(dirname, 'data/itunes.topalbums.rss'))
expected = {
'id': 'https://itunes.apple.com/us/rss/topalbums/limit=10/explicit=true/xml',
'imid': '647928068',
}
parselet = parslepy.Parselet(parselet_script, selector_handler=dsh)
extracted = parselet.parse(fp, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
xmldoc = b"""<?xml version="1.0" encoding="utf-8"?>
<feed xmlns:im="http://itunes.apple.com/rss" xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
<id>https://itunes.apple.com/us/rss/topalbums/limit=10/explicit=true/xml</id><title>iTunes Store: Top Albums</title><updated>2013-06-25T06:27:25-07:00</updated><link rel="alternate" type="text/html" href="https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewTop?cc=us&id=38&popId=11"/><link rel="self" href="https://itunes.apple.com/us/rss/topalbums/limit=10/explicit=true/xml"/><icon>http://itunes.apple.com/favicon.ico</icon><author><name>iTunes Store</name><uri>http://www.apple.com/itunes/</uri></author><rights>Copyright 2008 Apple Inc.</rights>
<entry>
<updated>2013-06-25T06:27:25-07:00</updated>
<id im:id="647928068">https://itunes.apple.com/us/album/the-gifted/id647928068?uo=2</id>
<title>The Gifted - Wale</title>
<im:name>The Gifted</im:name>
<im:image height="55">http://a815.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.55x55-70.jpg</im:image>
<im:image height="60">http://a1537.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.60x60-50.jpg</im:image>
<im:image height="170">http://a976.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.170x170-75.jpg</im:image>
</entry>
</feed>
"""
def test_parslepy_xpathparse_xml_fromstring():
parselet_script = {
"--(//atom:feed/atom:entry)": {
"title": "atom:title",
"name": "im:name",
"id": "atom:id/@im:id",
"images(im:image)": [{
"height": "@height",
"url": ".",
}],
"releasedate": "im:releaseDate",
}
}
xsh = parslepy.selectors.XPathSelectorHandler(
namespaces={
'atom': 'http://www.w3.org/2005/Atom',
'im': 'http://itunes.apple.com/rss',
}
)
expected = {
'id': '647928068',
'images': [
{ 'height': '55',
'url': 'http://a815.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.55x55-70.jpg'
},
{ 'height': '60',
'url': 'http://a1537.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.60x60-50.jpg'
},
{ 'height': '170',
'url': 'http://a976.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.170x170-75.jpg'
}
],
'name': 'The Gifted',
'title': 'The Gifted - Wale',
}
parselet = parslepy.Parselet(parselet_script, selector_handler=xsh)
extracted = parselet.parse_fromstring(xmldoc, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
def test_parslepy_defaultparse_xml_fromstring():
parselet_script = {
"--(//atom:feed/atom:entry)": {
"title": "atom:title",
"name": "im:name",
"id": "atom:id/@im:id",
"images(im:image)": [{
"height": "@height",
"url": ".",
}],
"releasedate": "im:releaseDate",
}
}
dsh = parslepy.selectors.DefaultSelectorHandler(
namespaces={
'atom': 'http://www.w3.org/2005/Atom',
'im': 'http://itunes.apple.com/rss',
}
)
expected = {
'id': '647928068',
'images': [
{ 'height': '55',
'url': 'http://a815.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.55x55-70.jpg'
},
{ 'height': '60',
'url': 'http://a1537.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.60x60-50.jpg'
},
{ 'height': '170',
'url': 'http://a976.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.170x170-75.jpg'
}
],
'name': 'The Gifted',
'title': 'The Gifted - Wale',
}
parselet = parslepy.Parselet(parselet_script, selector_handler=dsh)
extracted = parselet.parse_fromstring(xmldoc, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
def test_parslepy_defaultparse_xml_fromstring_cssselectors():
parselet_script = {
"--(atom|feed atom|entry)": {
"title": "atom|title",
"name": "im|name",
"id": "atom|id @im|id",
"images(im|image)": [{
"height": "@height",
"url": ".",
}],
"releasedate": "im|releaseDate",
}
}
dsh = parslepy.selectors.DefaultSelectorHandler(
namespaces={
'atom': 'http://www.w3.org/2005/Atom',
'im': 'http://itunes.apple.com/rss',
}
)
expected = {
'id': '647928068',
'images': [
{ 'height': '55',
'url': 'http://a815.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.55x55-70.jpg'
},
{ 'height': '60',
'url': 'http://a1537.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.60x60-50.jpg'
},
{ 'height': '170',
'url': 'http://a976.phobos.apple.com/us/r30/Features/v4/02/cc/73/02cc7370-693c-f0fe-505b-bb84043ce186/dj.pehmruyt.170x170-75.jpg'
}
],
'name': 'The Gifted',
'title': 'The Gifted - Wale',
}
parselet = parslepy.Parselet(parselet_script, selector_handler=dsh)
extracted = parselet.parse_fromstring(xmldoc, parser=lxml.etree.XMLParser())
assert_dict_equal(extracted, expected)
def test_parslepy_parse_html_file():
parselet = parslepy.Parselet({"title": "h1"})
expected = {'title': 'Markup Validation Service'}
dirname = os.path.dirname(os.path.abspath(__file__))
extracted = parselet.parse(
open(os.path.join(dirname, 'data/validator.w3.org.html'))
)
assert_dict_equal(extracted, expected)
def test_parslepy_parse_html_fromstring():
htmldoc = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>The W3C Markup Validation Service</title>
<link rev="made" href="mailto:[email protected]" />
<link rel="shortcut icon" href="http://www.w3.org/2008/site/images/favicon.ico" type="image/x-icon" />
<link rev="start" href="./" title="Home Page" />
<style type="text/css" media="all">
@import "./style/base";
</style>
<script type="text/javascript" src="scripts/combined"></script>
<meta name="keywords" content="HTML, HyperText Markup Language, Validation,
W3C Markup Validation Service" />
<meta name="description" content="W3C's easy-to-use
markup validation service, based on SGML and XML parsers." />
<link rel="alternate" type="application/atom+xml" href="http://www.w3.org/QA/Tools/validator-whatsnew.atom" />
</head>
<body>
<div id="banner">
<h1 id="title">
<a href="http://www.w3.org/"><img alt="W3C" width="110" height="61" id="logo" src="./images/w3c.png" /></a>
<a href="./"><span>Markup Validation Service</span></a>
</h1>
<p id="tagline">Check the markup (HTML, XHTML, ...) of Web documents</p>
</div>
</body>
</html>
"""
parselet = parslepy.Parselet(
{
"title": "h1",
"pid": "p[id] @id"
})
expected = {
'title': 'Markup Validation Service',
'pid': 'tagline'
}
extracted = parselet.parse_fromstring(htmldoc)
assert_dict_equal(extracted, expected)
| mit |
c-PRIMED/puq | test/CustomParameter_test.py | 1 | 7647 | #! /usr/bin/env python
'''
Testsuite for the CustomParameter class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
print("maximum difference is", np.max(np.abs(ay - y2)))
assert np.allclose(ay, y2, **args)
n = NormalParameter('x','x',mean=10,dev=1)
norm80 = n.pdf.lhs(80)
# test mean and deviation
def test_custom_pdf_meandev():
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(norm80))
assert np.allclose(c.pdf.mean, 10.0, rtol=.05), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 1.0, rtol=.05), "dev=%s" % c.pdf.dev
# test lhs()
def test_custom_pdf_lhs():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.9, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.lhs(1000)
dx, dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.01)
# test lhs1()
def test_custom_pdf_lhs1():
a = np.array([12,12,13,13,13,14,14,14,14,15,15,15,15,15,16,16,16,16,16,17,17,17,18,18])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.004)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.03)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
# test lhs()
def test_custom_pdf_lhs_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.7, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.ds(1000)
dx,dy = _hisplot(data, 40)
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
"""
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
# test lhs1()
def test_custom_pdf_lhs1_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_small():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.2), "dev=%s" % c.pdf.dev
def test_custom_pdf_small_fit():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.4), "dev=%s" % c.pdf.dev
# single data point. Must use Bayesian fit.
def test_custom_pdf_single_fit():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, error=NormalPDF(0,.1)))
assert np.allclose(c.pdf.mean, 42), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, .1, atol=.01), "dev=%s" % c.pdf.dev
def test_custom_pdf_single():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 42
assert c.pdf.dev == 0
assert c.pdf.mode == 42
def test_custom_pdf_zero():
a = np.array([0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozero():
a = np.array([0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero_fit():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_const():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
def test_custom_pdf_const_fit():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
#### EXCEPTION TESTING
# forget to include pdf
def test_custom_pdf_exception():
ok = False
try:
c = CustomParameter('x', 'X, the unknown')
except ValueError:
ok = True
except:
assert False, 'Wrong Exception'
if not ok:
assert False, 'No Exception when one was expected'
if __name__ == "__main__":
test_custom_pdf_meandev()
test_custom_pdf_lhs()
test_custom_pdf_lhs1()
test_custom_pdf_random()
test_custom_pdf_lhs_nofit()
test_custom_pdf_lhs1_nofit()
test_custom_pdf_random_nofit()
test_custom_pdf_exception()
test_custom_pdf_small()
test_custom_pdf_small_fit()
test_custom_pdf_single()
test_custom_pdf_single_fit()
test_custom_pdf_const()
test_custom_pdf_const_fit()
test_custom_pdf_zero()
test_custom_pdf_zerozero()
test_custom_pdf_zerozerozero()
test_custom_pdf_zerozerozero_fit()
| mit |
Lkhagvadelger/phantomjs | src/breakpad/src/tools/gyp/test/lib/TestGyp.py | 137 | 23211 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import os
import re
import shutil
import stat
import sys
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.initialize_build_tool()
if not kw.has_key('match'):
kw['match'] = TestCommon.match_exact
if not kw.has_key('workdir'):
# Default behavior: the null string causes TestCmd to create
# a temporary directory for us.
kw['workdir'] = ''
formats = kw.get('formats', [])
if kw.has_key('formats'):
del kw['formats']
super(TestGypBase, self).__init__(*args, **kw)
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+self.format in excluded_formats or
included_formats and self.format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make, scons) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# TODO: --depth=. works around Chromium-specific tree climbing.
args = ('--depth=.', '--format='+self.format, gyp_file) + args
return self.run(program=self.gyp, arguments=args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A libdir= keyword argument specifies a library subdirectory other
than the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.EXECUTABLE:
result.append(name + self._exe)
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
libdir = kw.get('libdir', 'lib')
result.extend([libdir, name])
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
libdir = kw.get('libdir', 'lib.target')
result.extend([libdir, name])
else:
result.append(name)
return self.workpath(*result)
class TestGypMSVS(TestGypBase):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
"""
Initializes the Visual Studio .build_tool parameter, searching %PATH%
and %PATHEXT% for a devenv.{exe,bat,...} executable, and falling
back to a hard-coded default (on the current drive) if necessary.
"""
super(TestGypMSVS, self).initialize_build_tool()
if not self.build_tool:
# We didn't find 'devenv' on the path. Just hard-code a default,
# and revisit this if it becomes important.
possible = [
('C:\\Program Files',
'Microsoft Visual Studio 8', 'Common7', 'IDE', 'devenv.com'),
# Note: if you're using this, set GYP_MSVS_VERSION=2008
# to get the tests to pass.
('C:\\Program Files (x86)',
'Microsoft Visual Studio 9.0', 'Common7', 'IDE', 'devenv.com'),
]
for build_tool in possible:
bt = os.path.join(*build_tool)
if os.path.exists(bt):
self.build_tool = bt
break
def build(self, gyp_file, target=None, rebuild=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
if not m or m.group(1) == '0':
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.EXECUTABLE:
result.append(name + self._exe)
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
result.extend(['lib', name])
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
result.append(name)
else:
result.append(name)
return self.workpath(*result)
class TestGypSCons(TestGypBase):
"""
Subclass for testing the GYP SCons generator.
"""
format = 'scons'
build_tool_list = ['scons', 'scons.py']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a scons build using the SCons configuration generated from the
specified gyp_file.
"""
arguments = kw.get('arguments', [])
dirname = os.path.dirname(gyp_file)
if dirname:
arguments.extend(['-C', dirname])
if self.configuration:
arguments.append('--mode=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified SCons target is up to date.
"""
if target in (None, self.DEFAULT):
up_to_date_targets = 'all'
else:
up_to_date_targets = target
up_to_date_lines = []
for arg in up_to_date_targets.split():
up_to_date_lines.append("scons: `%s' is up to date.\n" % arg)
kw['stdout'] = ''.join(up_to_date_lines)
arguments = kw.get('arguments', [])
arguments.append('-Q')
kw['arguments'] = arguments
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by scons.
"""
configuration = self.configuration_dirname()
os.environ['LD_LIBRARY_PATH'] = os.path.join(configuration, 'lib')
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Scons.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.EXECUTABLE:
result.append(name + self._exe)
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
result.extend(['lib', name])
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
result.extend(['lib', name])
else:
result.append(name)
return self.workpath(*result)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_ending = 'Checking Dependencies...\n** BUILD SUCCEEDED **\n'
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_ending):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
if type == self.EXECUTABLE:
result.append(name + self._exe)
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
result.append(name)
elif type == self.SHARED_LIB:
name = name + self._dll
result.append(name)
else:
result.append(name)
return self.workpath(*result)
format_class_list = [
TestGypGypd,
TestGypMake,
TestGypMSVS,
TestGypSCons,
TestGypXcode,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.get('format')
if format:
del kw['format']
else:
format = os.environ.get('TESTGYP_FORMAT')
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
| bsd-3-clause |
aurofable/medhack-server | venv/lib/python2.7/encodings/cp500.py | 593 | 13377 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
grilo/ansible-1 | lib/ansible/parsing/utils/addresses.py | 241 | 8167 | # Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleParserError, AnsibleError
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
numeric_range = r'''
\[
(?:[0-9]+:[0-9]+) # numeric begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
hexadecimal_range = r'''
\[
(?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
alphanumeric_range = r'''
\[
(?:
[a-z]:[a-z]| # one-char alphabetic range
[0-9]+:[0-9]+ # ...or a numeric one
)
(?::[0-9]+)? # numeric :step (optional)
\]
'''
# Components that match a 16-bit portion of an IPv6 address in hexadecimal
# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
# (0..255) or an [x:y(:z)] numeric range.
ipv6_component = r'''
(?:
[0-9a-f]{{1,4}}| # 0..ffff
{range} # or a numeric range
)
'''.format(range=hexadecimal_range)
ipv4_component = r'''
(?:
[01]?[0-9]{{1,2}}| # 0..199
2[0-4][0-9]| # 200..249
25[0-5]| # 250..255
{range} # or a numeric range
)
'''.format(range=numeric_range)
# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
# characters plus dashes (and underscores) or valid ranges. The label may not
# start or end with a hyphen or an underscore. This is interpolated into the
# hostname pattern below. We don't try to enforce the 63-char length limit.
label = r'''
(?:[\w]|{range}) # Starts with an alphanumeric or a range
(?:[\w_-]|{range})* # Then zero or more of the same or [_-]
(?<![_-]) # ...as long as it didn't end with [_-]
'''.format(range=alphanumeric_range)
patterns = {
# This matches a square-bracketed expression with a port specification. What
# is inside the square brackets is validated later.
'bracketed_hostport': re.compile(
r'''^
\[(.+)\] # [host identifier]
:([0-9]+) # :port number
$
''', re.X
),
# This matches a bare IPv4 address or hostname (or host pattern including
# [x:y(:z)] ranges) with a port specification.
'hostport': re.compile(
r'''^
((?: # We want to match:
[^:\[\]] # (a non-range character
| # ...or...
\[[^\]]*\] # a complete bracketed expression)
)*) # repeated as many times as possible
:([0-9]+) # followed by a port number
$
''', re.X
),
# This matches an IPv4 address, but also permits range expressions.
'ipv4': re.compile(
r'''^
(?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
$
'''.format(i4=ipv4_component), re.X | re.I
),
# This matches an IPv6 address, but also permits range expressions.
#
# This expression looks complex, but it really only spells out the various
# combinations in which the basic unit of an IPv6 address (0..ffff) can be
# written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
# as ::ffff:192.0.2.3.
#
# Note that we can't just use ipaddress.ip_address() because we also have to
# accept ranges in place of each component.
'ipv6': re.compile(
r'''^
(?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
(?:{0}:){{3}}(?::{0}){{1,4}}|
(?:{0}:){{4}}(?::{0}){{1,3}}|
(?:{0}:){{5}}(?::{0}){{1,2}}|
(?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
:(?::{0}){{1,6}}| # ::ffff(:ffff...)
{0}?::| # ffff::, ::
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
(?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
$
'''.format(ipv6_component), re.X | re.I
),
# This matches a hostname or host pattern including [x:y(:z)] ranges.
#
# We roughly follow DNS rules here, but also allow ranges (and underscores).
# In the past, no systematic rules were enforced about inventory hostnames,
# but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
# various metacharacters anyway.
#
# We don't enforce DNS length restrictions here (63 characters per label,
# 253 characters total) or make any attempt to process IDNs.
'hostname': re.compile(
r'''^
{label} # We must have at least one label
(?:\.{label})* # Followed by zero or more .labels
$
'''.format(label=label), re.X | re.I | re.UNICODE
),
}
def parse_address(address, allow_ranges=False):
"""
Takes a string and returns a (host, port) tuple. If the host is None, then
the string could not be parsed as a host identifier with an optional port
specification. If the port is None, then no port was specified.
The host identifier may be a hostname (qualified or not), an IPv4 address,
or an IPv6 address. If allow_ranges is True, then any of those may contain
[x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
The port number is an optional :NN suffix on an IPv4 address or host name,
or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
IPv4 address, or host name. (This means the only way to specify a port for
an IPv6 address is to enclose it in square brackets.)
"""
# First, we extract the port number if one is specified.
port = None
for matching in ['bracketed_hostport', 'hostport']:
m = patterns[matching].match(address)
if m:
(address, port) = m.groups()
port = int(port)
continue
# What we're left with now must be an IPv4 or IPv6 address, possibly with
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
for matching in ['ipv4', 'ipv6', 'hostname']:
m = patterns[matching].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
if not host:
raise AnsibleError("Not a valid network hostname: %s" % address)
# If we get to this point, we know that any included ranges are valid.
# If the caller is prepared to handle them, all is well.
# Otherwise we treat it as a parse failure.
if not allow_ranges and '[' in host:
raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
return (host, port)
| gpl-3.0 |
lucychambers/lucychambers.github.io | .bundle/ruby/2.0.0/gems/pygments.rb-0.6.2/vendor/pygments-main/ez_setup.py | 164 | 12155 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "1.4.2"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _check_call_py24(cmd, *args, **kwargs):
res = subprocess.call(cmd, *args, **kwargs)
class CalledProcessError(Exception):
pass
if not res == 0:
msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
raise CalledProcessError(msg)
vars(subprocess).setdefault('check_call', _check_call_py24)
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of setuptools (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U setuptools'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
bogus-py/graphite-api | docs/conf.py | 8 | 2188 | #!/usr/bin/env python3
# coding: utf-8
import os
import re
import sys
import sphinx_rtd_theme
from sphinx.ext import autodoc
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Graphite-API'
copyright = u'2014, Bruno Renié'
version = '1.0.1'
release = '1.0.1'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmlhelp_basename = 'Graphite-APIdoc'
latex_elements = {
}
latex_documents = [
('index', 'Graphite-API.tex', 'Graphite-API Documentation',
'Bruno Renié', 'manual'),
]
man_pages = [
('index', 'graphite-api', 'Graphite-API Documentation',
['Bruno Renié'], 1)
]
texinfo_documents = [
('index', 'Graphite-API', 'Graphite-API Documentation',
'Bruno Renié', 'Graphite-API', 'One line description of project.',
'Miscellaneous'),
]
class RenderFunctionDocumenter(autodoc.FunctionDocumenter):
priority = 10
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return autodoc.FunctionDocumenter.can_document_member(
member, membername, isattr, parent
) and parent.name == 'graphite_api.functions'
def format_args(self):
args = super(RenderFunctionDocumenter, self).format_args()
if args is not None:
return re.sub('requestContext, ', '', args)
def setup(app):
app.add_autodocumenter(RenderFunctionDocumenter)
add_module_names = False
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
for mod_name in ['cairocffi']:
sys.modules[mod_name] = Mock()
| apache-2.0 |
friedrich420/Note-4-TMO-AEL-Kernel-Lollipop-Source | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
tobinjt/Flexget | flexget/components/series/metainfo_series.py | 4 | 2356 | from __future__ import unicode_literals, division, absolute_import
import logging
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget.event import event
from . import series as plugin_series
from flexget import plugin
try:
# NOTE: Importing other plugins is discouraged!
from flexget.components.parsing.parsers import parser_common as plugin_parser_common
except ImportError:
raise plugin.DependencyError(issued_by=__name__, missing='parser_common')
log = logging.getLogger('metainfo_series')
class MetainfoSeries(object):
"""
Check if entry appears to be a series, and populate series info if so.
"""
schema = {'type': 'boolean'}
# Run after series plugin so we don't try to re-parse it's entries
@plugin.priority(120)
def on_task_metainfo(self, task, config):
# Don't run if we are disabled
if config is False:
return
for entry in task.entries:
# If series plugin already parsed this, don't touch it.
if entry.get('id'):
continue
self.guess_entry(entry)
def guess_entry(self, entry, allow_seasonless=False, config=None):
"""
Populates series_* fields for entries that are successfully parsed.
:param dict config: A series config to be used. This will also cause 'path' and 'set' fields to be populated.
"""
if entry.get('series_parser') and entry['series_parser'].valid:
# Return true if we already parsed this, false if series plugin parsed it
return True
identified_by = 'auto'
if config and 'identified_by' in config:
identified_by = config['identified_by']
parsed = plugin.get('parsing', self).parse_series(
data=entry['title'], identified_by=identified_by, allow_seasonless=allow_seasonless
)
if parsed and parsed.valid:
parsed.name = plugin_parser_common.normalize_name(
plugin_parser_common.remove_dirt(parsed.name)
)
plugin_series.populate_entry_fields(entry, parsed, config)
entry['series_guessed'] = True
return True
return False
@event('plugin.register')
def register_plugin():
plugin.register(MetainfoSeries, 'metainfo_series', api_ver=2)
| mit |
TeXitoi/navitia | source/jormungandr/jormungandr/protobuf_to_dict.py | 12 | 3072 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from google.protobuf.descriptor import FieldDescriptor
__all__ = ["protobuf_to_dict", "TYPE_CALLABLE_MAP"]
TYPE_CALLABLE_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: long,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: long,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: long,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: long,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: long,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: unicode,
FieldDescriptor.TYPE_BYTES: lambda b: b.encode("base64"),
FieldDescriptor.TYPE_ENUM: int,
}
def repeated(type_callable):
return lambda value_list: [type_callable(value) for value in value_list]
def enum_label_name(field, value):
return field.enum_type.values_by_number[int(value)].name
def protobuf_to_dict(pb, type_callable_map=TYPE_CALLABLE_MAP,
use_enum_labels=False):
# recursion!
type_callable_map[FieldDescriptor.TYPE_MESSAGE] = \
lambda pb: protobuf_to_dict(pb, type_callable_map, use_enum_labels)
result_dict = {}
for field, value in pb.ListFields():
if field.type not in type_callable_map:
raise TypeError("Field %s.%s has unrecognised type id %d" % (
pb.__class__.__name__, field.name, field.type))
type_callable = type_callable_map[field.type]
if use_enum_labels and field.type == FieldDescriptor.TYPE_ENUM:
type_callable = lambda value: enum_label_name(field, value)
if field.label == FieldDescriptor.LABEL_REPEATED:
type_callable = repeated(type_callable)
result_dict[field.name] = type_callable(value)
return result_dict
| agpl-3.0 |
nitzmahone/ansible | lib/ansible/plugins/lookup/url.py | 36 | 3469 | # (c) 2015, Brian Coca <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: url
author: Brian Coca (@bcoca)
version_added: "1.9"
short_description: return contents from URL
description:
- Returns the content of the URL requested to be used as data in play.
options:
_terms:
description: urls to query
validate_certs:
description: Flag to control SSL certificate validation
type: boolean
default: True
split_lines:
description: Flag to control if content is returned as a list of lines or as a single text blob
type: boolean
default: True
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
type: boolean
default: True
username:
description: Username to use for HTTP authentication.
type: string
default: None
version_added: "2.8"
password:
description: Password to use for HTTP authentication.
type: string
default: None
version_added: "2.8"
"""
EXAMPLES = """
- name: url lookup splits lines by default
debug: msg="{{item}}"
loop: "{{ lookup('url', 'https://github.com/gremlin.keys', wantlist=True) }}"
- name: display ip ranges
debug: msg="{{ lookup('url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False) }}"
- name: url lookup using authentication
debug: msg="{{ lookup('url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2') }}"
"""
RETURN = """
_list:
description: list of list of lines or content of url(s)
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(direct=kwargs)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=self.get_option('validate_certs'),
use_proxy=self.get_option('use_proxy'),
url_username=self.get_option('username'),
url_password=self.get_option('password'))
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
if self.get_option('split_lines'):
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
| gpl-3.0 |
saurabh6790/med_test_lib | webnotes/utils/__init__.py | 22 | 23735 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals
from webnotes import conf
import webnotes
no_value_fields = ['Section Break', 'Column Break', 'HTML', 'Table', 'FlexTable',
'Button', 'Image', 'Graph']
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(profile):
"""get the full name (first name + last name) of the user from Profile"""
p = webnotes.conn.sql("""select first_name, last_name from `tabProfile`
where name=%s""", profile, as_dict=1)
if p:
profile = " ".join(filter(None,
[p[0].get('first_name'), p[0].get('last_name')])) or profile
return profile
def get_formatted_email(user):
"""get email id of user formatted as: John Doe <[email protected]>"""
if user == "Administrator":
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
def extract_email_id(email):
"""fetch only the email part of the email id"""
from email.utils import parseaddr
fullname, email_id = parseaddr(email)
if isinstance(email_id, basestring) and not isinstance(email_id, unicode):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
import re
return re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
def get_request_site_address(full_address=False):
"""get app url from request"""
import os
host_name = conf.host_name
if not host_name:
if webnotes.request:
protocol = 'https' == webnotes.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + webnotes.request.host
elif webnotes.local.site:
return "http://" + webnotes.local.site
else:
return "http://localhost"
if full_address:
return host_name + webnotes.get_request_header("REQUEST_URI", "")
else:
return host_name
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def load_json(arg):
# already a dictionary?
if not isinstance(arg, basestring):
return arg
import json
return json.loads(arg, encoding='utf-8')
# Get Traceback
# ==============================================================================
def getTraceback():
"""
Returns the traceback of the Exception
"""
import sys, traceback
exc_type, value, tb = sys.exc_info()
trace_list = traceback.format_tb(tb, None) + \
traceback.format_exception_only(exc_type, value)
body = "Traceback (innermost last):\n" + "%-20s %s" % \
(unicode((b"").join(trace_list[:-1]), 'utf-8'), unicode(trace_list[-1], 'utf-8'))
if webnotes.logger:
webnotes.logger.error('Db:'+(webnotes.conn and webnotes.conn.cur_db_name or '') \
+ ' - ' + body)
return body
def log(event, details):
webnotes.logger.info(details)
# datetime functions
def getdate(string_date):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
import datetime
if isinstance(string_date, datetime.date):
return string_date
elif isinstance(string_date, datetime.datetime):
return datetime.date()
if " " in string_date:
string_date = string_date.split(" ")[0]
return datetime.datetime.strptime(string_date, "%Y-%m-%d").date()
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
format = isinstance(date, basestring)
if date:
date = getdate(date)
else:
raise Exception, "Start date required"
from dateutil.relativedelta import relativedelta
date += relativedelta(years=years, months=months, days=days)
if format:
return date.strftime("%Y-%m-%d")
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).seconds
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).seconds) / 3600, 6)
def now_datetime():
from datetime import datetime
return convert_utc_to_user_timezone(datetime.utcnow())
def get_user_time_zone():
if getattr(webnotes.local, "user_time_zone", None) is None:
webnotes.local.user_time_zone = webnotes.cache().get_value("time_zone")
if not webnotes.local.user_time_zone:
webnotes.local.user_time_zone = webnotes.conn.get_value('Control Panel', None, 'time_zone') \
or 'Asia/Calcutta'
webnotes.cache().set_value("time_zone", webnotes.local.user_time_zone)
return webnotes.local.user_time_zone
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_user_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(webnotes.local, "current_date", None):
return getdate(webnotes.local.current_date).strftime("%Y-%m-%d") + " " + \
now_datetime().strftime('%H:%M:%S')
else:
return now_datetime().strftime('%Y-%m-%d %H:%M:%S')
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime('%Y-%m-%d')
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime('%H:%M')
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
import datetime
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
import datetime
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_datetime(datetime_str):
from datetime import datetime
if isinstance(datetime_str, datetime):
return datetime_str.replace(microsecond=0, tzinfo=None)
return datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime('%Y-%m-%d %H:%M:%S')
def formatdate(string_date=None):
"""
Convers the given string date to :data:`user_format`
User format specified in :term:`Control Panel`
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
if string_date:
string_date = getdate(string_date)
else:
string_date = now_datetime().date()
if getattr(webnotes.local, "user_format", None) is None:
webnotes.local.user_format = webnotes.conn.get_default("date_format")
out = webnotes.local.user_format
return out.replace("dd", string_date.strftime("%d"))\
.replace("mm", string_date.strftime("%m"))\
.replace("yyyy", string_date.strftime("%Y"))
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
import urllib
t = []
for k in args.keys():
t.append(str(k)+'='+urllib.quote(str(args[k] or '')))
return sep.join(t)
def timestamps_equal(t1, t2):
"""Returns true if same the two string timestamps are same"""
scrub = lambda x: x.replace(':', ' ').replace('-',' ').split()
t1, t2 = scrub(t1), scrub(t2)
if len(t1) != len(t2):
return
for i in range(len(t1)):
if t1[i]!=t2[i]:
return
return 1
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = _round(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def _round(num, precision=0):
"""round method for round halfs to nearest even algorithm"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
import math
floor = math.floor(num)
decimal_part = num - floor
if decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
import datetime
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = webnotes.conn.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + (precision and (decimal_str + decimals) or "")
amount = minus + amount
if currency:
symbol = webnotes.conn.get_value("Currency", currency, "symbol")
if symbol:
amount = symbol + " " + amount
return amount
number_format_info = {
"#.###": ("", ".", 0),
"#,###": ("", ",", 0),
"#,###.##": (".", ",", 2),
"#,##,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"#,###.###": (".", ",", 3),
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = webnotes.conn.get_value("Currency", main_currency, "fraction") or "Cent"
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = webnotes.conn.get_value("Currency", main_currency, "number_format") or \
webnotes.conn.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' and ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' only.'
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
n=int(integer)
known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',
19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
def psn(n, known, xpsn):
import sys;
if n in known: return known[n]
bestguess, remainder = str(n), 0
if n<=20:
webnotes.errprint(sys.stderr)
webnotes.errprint(n)
webnotes.errprint("How did this happen?")
assert 0
elif n < 100:
bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn)
return bestguess
elif n < 1000:
bestguess= xpsn(n//100, known, xpsn) + ' ' + 'hundred'
remainder = n%100
else:
if in_million:
if n < 1000000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 1000000000:
bestguess= xpsn(n//1000000, known, xpsn) + ' ' + 'million'
remainder = n%1000000
else:
bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + 'billion'
remainder = n%1000000000
else:
if n < 100000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 10000000:
bestguess= xpsn(n//100000, known, xpsn) + ' ' + 'lakh'
remainder = n%100000
else:
bestguess= xpsn(n//10000000, known, xpsn) + ' ' + 'crore'
remainder = n%10000000
if remainder:
if remainder >= 100:
comma = ','
else:
comma = ''
return bestguess + comma + ' ' + xpsn(remainder, known, xpsn)
else:
return bestguess
return psn(n, known, psn)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the :term:`Control Panel`, or a value if key is passed
"""
return webnotes.conn.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to :term:`Control Panel`
"""
return webnotes.conn.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def pprint_dict(d, level=1, no_blanks=True):
"""
Pretty print a dictionary with indents
"""
if no_blanks:
remove_blanks(d)
# make indent
indent, ret = '', ''
for i in range(0,level): indent += '\t'
# add lines
comment, lines = '', []
kl = d.keys()
kl.sort()
# make lines
for key in kl:
if key != '##comment':
tmp = {key: d[key]}
lines.append(indent + str(tmp)[1:-1] )
# add comment string
if '##comment' in kl:
ret = ('\n' + indent) + '# ' + d['##comment'] + '\n'
# open
ret += indent + '{\n'
# lines
ret += indent + ',\n\t'.join(lines)
# close
ret += '\n' + indent + '}'
return ret
def get_common(d1,d2):
"""
returns (list of keys) the common part of two dicts
"""
return [p for p in d1 if p in d2 and d1[p]==d2[p]]
def get_common_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
ret = {}
for key in d1:
if key in d2 and d2[key]==d1[key]:
ret[key] = d1[key]
return ret
def get_diff_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
diff_keys = set(d2.keys()).difference(set(d1.keys()))
ret = {}
for d in diff_keys: ret[d] = d2[d]
return ret
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
import os
from webnotes.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError, e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
def strip_html(text):
"""
removes anything enclosed in and including <>
"""
import re
return re.compile(r'<.*?>').sub('', text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def get_doctype_label(dt=None):
"""
Gets label of a doctype
"""
if dt:
res = webnotes.conn.sql("""\
SELECT name, dt_label FROM `tabDocType Label`
WHERE name=%s""", dt)
return res and res[0][0] or dt
else:
res = webnotes.conn.sql("SELECT name, dt_label FROM `tabDocType Label`")
dt_label_dict = {}
for r in res:
dt_label_dict[r[0]] = r[1]
return dt_label_dict
def get_label_doctype(label):
"""
Gets doctype from its label
"""
res = webnotes.conn.sql("""\
SELECT name FROM `tabDocType Label`
WHERE dt_label=%s""", label)
return res and res[0][0] or label
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
from datetime import datetime
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S')
now_dt = datetime.strptime(now(), '%Y-%m-%d %H:%M:%S')
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print err
if out: print out
return err, out
def comma_or(some_list):
return comma_sep(some_list, " or ")
def comma_and(some_list):
return comma_sep(some_list, " and ")
def comma_sep(some_list, sep):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return ", ".join(some_list[:-1]) + sep + some_list[-1]
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_path(*path, **kwargs):
base = kwargs.get('base')
if not base:
base = get_base_path()
import os
return os.path.join(base, *path)
def get_base_path():
import conf
import os
return os.path.dirname(os.path.abspath(conf.__file__))
def get_site_base_path(sites_dir=None, hostname=None):
if not sites_dir:
sites_dir = conf.sites_dir
if not hostname:
hostname = conf.site
if not (sites_dir and hostname):
return get_base_path()
import os
return os.path.join(sites_dir, hostname)
def get_site_path(*path):
return get_path(base=get_site_base_path(), *path)
def get_files_path():
return get_site_path(webnotes.conf.files_path)
def get_backups_path():
return get_site_path(webnotes.conf.backup_path)
def get_url(uri=None):
url = get_request_site_address()
if not url or "localhost" in url:
subdomain = webnotes.conn.get_value("Website Settings", "Website Settings",
"subdomain")
if subdomain:
if "http" not in subdomain:
url = "http://" + subdomain
if uri:
import urllib
url = urllib.basejoin(url, uri)
return url
def get_url_to_form(doctype, name, base_url=None, label=None):
if not base_url:
base_url = get_url()
if not label: label = name
return """<a href="%(base_url)s/app.html#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals()
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and isinstance(d[key], unicode):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and not isinstance(d[key], unicode):
d[key] = d[key].decode(encoding, "ignore")
return d
import operator
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
if condition in operator_map:
return operator_map[condition]((val1, val2))
return False
def get_site_name(hostname):
return hostname.split(':')[0]
def get_disk_usage():
"""get disk usage of files folder"""
import os
files_path = get_files_path()
if not os.path.exists(files_path):
return 0
err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path))
return cint(out.split("\n")[-2].split("\t")[0])
def expand_partial_links(html):
import re
url = get_url()
if not url.endswith("/"): url += "/"
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)',
'\g<1>\g<2>{}\g<3>\g<4>'.format(url), html)
| mit |
CompPhysics/ComputationalPhysics | doc/Programs/LecturePrograms/programs/PDE/python/2dwave/pythonmovie.py | 6 | 2018 | #!/usr/bin/env python
# This script reads in data from file with the solutions of the
# 2dim wave function. The data are organized as
# time
# l, i, j, u(i,j) where k is the time index t_l, i refers to x_i and j to y_j
# At the end it converts a series of png files to a movie
# file movie.gif. You can run this movie file using the ImageMagick
# software animate as - animate movie.gif et voila', Hollywood next
# It creates a movie of the time evolution with the scitools.easyviz library.
# To fetch this addition to python go to the link
# http://code.google.com/p/scitools/wiki/Installation
# This additional tool is the same as that used in INF1100 and should
# be installed on most machines.
from numpy import *
from scitools.easyviz import *
import sys, os
try:
inputfilename = sys.argv[1]
except:
print "Usage of this script", sys.argv[0], "inputfile"; sys.exit(1)
# Read file with data
ifile = open(inputfilename)
lines = ifile.readlines()
ifile.close()
# Fixed Lengths used in other function to set up the grids.
start = lines[0].split()
stop = lines[-1].split()
Lx = int(start[1]) + 1; nx = int(stop[1]) + 1
Ly = int(start[2]) + 1; ny = int(stop[2]) + 1
ntime = int(stop[0])
x, y = ndgrid(linspace(0, Lx, nx), linspace(0, Ly, ny), sparse=False)
ifile = open(inputfilename)
plotnr = 0
u = zeros([nx, ny])
# Loop over time steps
for l_ind in xrange(1, ntime + 1):
for i_ind in range(0, nx):
for j_ind in range(0, ny):
elements = []
while len(elements) < 4:
elements = ifile.readline().split()
l, i, j, value = elements
if l_ind != int(l):
raise IndexError, 'l_ind=%d, l=%d -> l_ind != l' %(l_ind, int(l))
u[int(i), int(j)] = float(value)
plotnr += 1
mesh(x, y, u, hardcopy='frame%04d.png' %plotnr, show=False,
axis=[0, 1, 0, 1,- 1, 1])
# Make movie
movie('frame*.png', encoder='convert', output_file='movie.gif', fps=10)
cmd = 'animate movie.gif'
os.system(cmd)
| cc0-1.0 |
temasek/android_external_chromium_org | third_party/tlslite/tlslite/VerifierDB.py | 359 | 3104 | """Class for storing SRP password verifiers."""
from utils.cryptomath import *
from utils.compat import *
import mathtls
from BaseDB import BaseDB
class VerifierDB(BaseDB):
"""This class represent an in-memory or on-disk database of SRP
password verifiers.
A VerifierDB can be passed to a server handshake to authenticate
a client based on one of the verifiers.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new VerifierDB instance.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "verifier")
def _getItem(self, username, valueStr):
(N, g, salt, verifier) = valueStr.split(" ")
N = base64ToNumber(N)
g = base64ToNumber(g)
salt = base64ToString(salt)
verifier = base64ToNumber(verifier)
return (N, g, salt, verifier)
def __setitem__(self, username, verifierEntry):
"""Add a verifier entry to the database.
@type username: str
@param username: The username to associate the verifier with.
Must be less than 256 characters in length. Must not already
be in the database.
@type verifierEntry: tuple
@param verifierEntry: The verifier entry to add. Use
L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a
verifier entry.
"""
BaseDB.__setitem__(self, username, verifierEntry)
def _setItem(self, username, value):
if len(username)>=256:
raise ValueError("username too long")
N, g, salt, verifier = value
N = numberToBase64(N)
g = numberToBase64(g)
salt = stringToBase64(salt)
verifier = numberToBase64(verifier)
valueStr = " ".join( (N, g, salt, verifier) )
return valueStr
def _checkItem(self, value, username, param):
(N, g, salt, verifier) = value
x = mathtls.makeX(salt, username, param)
v = powMod(g, x, N)
return (verifier == v)
def makeVerifier(username, password, bits):
"""Create a verifier entry which can be stored in a VerifierDB.
@type username: str
@param username: The username for this verifier. Must be less
than 256 characters in length.
@type password: str
@param password: The password for this verifier.
@type bits: int
@param bits: This values specifies which SRP group parameters
to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144,
8192). Larger values are more secure but slower. 2048 is a
good compromise between safety and speed.
@rtype: tuple
@return: A tuple which may be stored in a VerifierDB.
"""
return mathtls.makeVerifier(username, password, bits)
makeVerifier = staticmethod(makeVerifier) | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.