| repo_name
				 stringlengths 5 100 | path
				 stringlengths 4 299 | copies
				 stringclasses 990
				values | size
				 stringlengths 4 7 | content
				 stringlengths 666 1.03M | license
				 stringclasses 15
				values | hash
				 int64 -9,223,351,895,964,839,000 9,223,297,778B | line_mean
				 float64 3.17 100 | line_max
				 int64 7 1k | alpha_frac
				 float64 0.25 0.98 | autogenerated
				 bool 1
				class | 
|---|---|---|---|---|---|---|---|---|---|---|
| 
	googlecartographer/cartographer | 
	docs/source/conf.py | 
	5 | 
	9092 | 
	# -*- coding: utf-8 -*-
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cartographer documentation build configuration file, created by
# sphinx-quickstart on Fri Jul  8 10:41:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.todo',
    'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cartographer'
copyright = u'{year} The Cartographer Authors'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cartographerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
    # The paper size ('letterpaper' or 'a4paper').
    #'papersize': 'letterpaper',
    # The font size ('10pt', '11pt' or '12pt').
    #'pointsize': '10pt',
    # Additional stuff for the LaTeX preamble.
    #'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
    ('index', 'Cartographer.tex', u'Cartographer Documentation',
     u'The Cartographer Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'cartographer', u'Cartographer Documentation',
     [u'The Cartographer Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
    ('index', 'Cartographer', u'Cartographer Documentation',
     u'The Cartographer Authors', 'Cartographer',
     'Cartographer is a system that provides real-time simultaneous '
     'localization and mapping (SLAM) in 2D and 3D across multiple platforms '
     'and sensor configurations.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
 | 
	apache-2.0 | 7,089,863,709,540,181,000 | 32.061818 | 79 | 0.711835 | false | 
| 
	partofthething/home-assistant | 
	tests/components/apple_tv/conftest.py | 
	8 | 
	3388 | 
	"""Fixtures for component."""
from unittest.mock import patch
from pyatv import conf, net
import pytest
from .common import MockPairingHandler, create_conf
@pytest.fixture(autouse=True, name="mock_scan")
def mock_scan_fixture():
    """Mock pyatv.scan."""
    with patch("homeassistant.components.apple_tv.config_flow.scan") as mock_scan:
        async def _scan(loop, timeout=5, identifier=None, protocol=None, hosts=None):
            if not mock_scan.hosts:
                mock_scan.hosts = hosts
            return mock_scan.result
        mock_scan.result = []
        mock_scan.hosts = None
        mock_scan.side_effect = _scan
        yield mock_scan
@pytest.fixture(name="dmap_pin")
def dmap_pin_fixture():
    """Mock pyatv.scan."""
    with patch("homeassistant.components.apple_tv.config_flow.randrange") as mock_pin:
        mock_pin.side_effect = lambda start, stop: 1111
        yield mock_pin
@pytest.fixture
def pairing():
    """Mock pyatv.scan."""
    with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair:
        async def _pair(config, protocol, loop, session=None, **kwargs):
            handler = MockPairingHandler(
                await net.create_session(session), config.get_service(protocol)
            )
            handler.always_fail = mock_pair.always_fail
            return handler
        mock_pair.always_fail = False
        mock_pair.side_effect = _pair
        yield mock_pair
@pytest.fixture
def pairing_mock():
    """Mock pyatv.scan."""
    with patch("homeassistant.components.apple_tv.config_flow.pair") as mock_pair:
        async def _pair(config, protocol, loop, session=None, **kwargs):
            return mock_pair
        async def _begin():
            pass
        async def _close():
            pass
        mock_pair.close.side_effect = _close
        mock_pair.begin.side_effect = _begin
        mock_pair.pin = lambda pin: None
        mock_pair.side_effect = _pair
        yield mock_pair
@pytest.fixture
def full_device(mock_scan, dmap_pin):
    """Mock pyatv.scan."""
    mock_scan.result.append(
        create_conf(
            "127.0.0.1",
            "MRP Device",
            conf.MrpService("mrpid", 5555),
            conf.DmapService("dmapid", None, port=6666),
            conf.AirPlayService("airplayid", port=7777),
        )
    )
    yield mock_scan
@pytest.fixture
def mrp_device(mock_scan):
    """Mock pyatv.scan."""
    mock_scan.result.append(
        create_conf("127.0.0.1", "MRP Device", conf.MrpService("mrpid", 5555))
    )
    yield mock_scan
@pytest.fixture
def dmap_device(mock_scan):
    """Mock pyatv.scan."""
    mock_scan.result.append(
        create_conf(
            "127.0.0.1",
            "DMAP Device",
            conf.DmapService("dmapid", None, port=6666),
        )
    )
    yield mock_scan
@pytest.fixture
def dmap_device_with_credentials(mock_scan):
    """Mock pyatv.scan."""
    mock_scan.result.append(
        create_conf(
            "127.0.0.1",
            "DMAP Device",
            conf.DmapService("dmapid", "dummy_creds", port=6666),
        )
    )
    yield mock_scan
@pytest.fixture
def airplay_device(mock_scan):
    """Mock pyatv.scan."""
    mock_scan.result.append(
        create_conf(
            "127.0.0.1", "AirPlay Device", conf.AirPlayService("airplayid", port=7777)
        )
    )
    yield mock_scan
 | 
	mit | 8,095,179,028,966,757,000 | 24.862595 | 86 | 0.597107 | false | 
| 
	fentas/phantomjs | 
	src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py | 
	118 | 
	7503 | 
	# Copyright (C) 2012 Zan Dobersek <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#    * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#    * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#    * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
    def make_driver(self, worker_number=0, xorg_running=False, executive=None):
        port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
        port._config.build_directory = lambda configuration: "/mock-build"
        port._server_process_constructor = MockServerProcess
        if xorg_running:
            port._executive._running_pids['Xorg'] = 108
        driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
        driver._startup_delay_secs = 0
        return driver
    def cleanup_driver(self, driver):
        # Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
        # killing the Xvfb process if present. Thus, this method should only be called from tests that do not
        # intend to test the behavior of XvfbDriver.stop.
        driver._xvfb_process = None
    def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
        OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
        self.assertTrue(driver._server_process.started)
        self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
    def test_start_no_pixel_tests(self):
        driver = self.make_driver()
        expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
        self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0")
        self.cleanup_driver(driver)
    def test_start_pixel_tests(self):
        driver = self.make_driver()
        expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
        self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
        self.cleanup_driver(driver)
    def test_start_arbitrary_worker_number(self):
        driver = self.make_driver(worker_number=17)
        expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
        self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
        self.cleanup_driver(driver)
    def test_next_free_display(self):
        output = "Xorg            /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb            Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
        executive = MockExecutive2(output)
        driver = self.make_driver(executive=executive)
        self.assertEqual(driver._next_free_display(), 2)
        self.cleanup_driver(driver)
        output = "X               /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
        executive = MockExecutive2(output)
        driver = self.make_driver(executive=executive)
        self.assertEqual(driver._next_free_display(), 1)
        self.cleanup_driver(driver)
        output = "Xvfb            Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
        executive = MockExecutive2(output)
        driver = self.make_driver(executive=executive)
        self.assertEqual(driver._next_free_display(), 1)
        self.cleanup_driver(driver)
        output = "Xvfb            Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb            Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb            Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
        executive = MockExecutive2(output)
        driver = self.make_driver(executive=executive)
        self.assertEqual(driver._next_free_display(), 2)
        self.cleanup_driver(driver)
    def test_start_next_worker(self):
        driver = self.make_driver()
        driver._next_free_display = lambda: 0
        expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
        self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
        self.cleanup_driver(driver)
        driver = self.make_driver()
        driver._next_free_display = lambda: 3
        expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
        self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True)
        self.cleanup_driver(driver)
    def test_stop(self):
        filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
        port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
        port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
        driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
        class FakeXvfbProcess(object):
            pid = 1234
        driver._xvfb_process = FakeXvfbProcess()
        driver._lock_file = '/tmp/.X42-lock'
        expected_logs = "MOCK kill_process pid: 1234\n"
        OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
        self.assertIsNone(driver._xvfb_process)
        self.assertFalse(port._filesystem.exists(driver._lock_file))
 | 
	bsd-3-clause | 7,573,119,017,516,415,000 | 54.577778 | 197 | 0.698121 | false | 
| 
	valkjsaaa/sl4a | 
	python/src/Lib/contextlib.py | 
	62 | 
	4136 | 
	"""Utilities for with-statement contexts.  See PEP 343."""
import sys
from functools import wraps
__all__ = ["contextmanager", "nested", "closing"]
class GeneratorContextManager(object):
    """Helper for @contextmanager decorator."""
    def __init__(self, gen):
        self.gen = gen
    def __enter__(self):
        try:
            return self.gen.next()
        except StopIteration:
            raise RuntimeError("generator didn't yield")
    def __exit__(self, type, value, traceback):
        if type is None:
            try:
                self.gen.next()
            except StopIteration:
                return
            else:
                raise RuntimeError("generator didn't stop")
        else:
            if value is None:
                # Need to force instantiation so we can reliably
                # tell if we get the same exception back
                value = type()
            try:
                self.gen.throw(type, value, traceback)
                raise RuntimeError("generator didn't stop after throw()")
            except StopIteration, exc:
                # Suppress the exception *unless* it's the same exception that
                # was passed to throw().  This prevents a StopIteration
                # raised inside the "with" statement from being suppressed
                return exc is not value
            except:
                # only re-raise if it's *not* the exception that was
                # passed to throw(), because __exit__() must not raise
                # an exception unless __exit__() itself failed.  But throw()
                # has to raise the exception to signal propagation, so this
                # fixes the impedance mismatch between the throw() protocol
                # and the __exit__() protocol.
                #
                if sys.exc_info()[1] is not value:
                    raise
def contextmanager(func):
    """@contextmanager decorator.
    Typical usage:
        @contextmanager
        def some_generator(<arguments>):
            <setup>
            try:
                yield <value>
            finally:
                <cleanup>
    This makes this:
        with some_generator(<arguments>) as <variable>:
            <body>
    equivalent to this:
        <setup>
        try:
            <variable> = <value>
            <body>
        finally:
            <cleanup>
    """
    @wraps(func)
    def helper(*args, **kwds):
        return GeneratorContextManager(func(*args, **kwds))
    return helper
@contextmanager
def nested(*managers):
    """Support multiple context managers in a single with-statement.
    Code like this:
        with nested(A, B, C) as (X, Y, Z):
            <body>
    is equivalent to this:
        with A as X:
            with B as Y:
                with C as Z:
                    <body>
    """
    exits = []
    vars = []
    exc = (None, None, None)
    try:
        for mgr in managers:
            exit = mgr.__exit__
            enter = mgr.__enter__
            vars.append(enter())
            exits.append(exit)
        yield vars
    except:
        exc = sys.exc_info()
    finally:
        while exits:
            exit = exits.pop()
            try:
                if exit(*exc):
                    exc = (None, None, None)
            except:
                exc = sys.exc_info()
        if exc != (None, None, None):
            # Don't rely on sys.exc_info() still containing
            # the right information. Another exception may
            # have been raised and caught by an exit method
            raise exc[0], exc[1], exc[2]
class closing(object):
    """Context to automatically close something at the end of a block.
    Code like this:
        with closing(<module>.open(<arguments>)) as f:
            <block>
    is equivalent to this:
        f = <module>.open(<arguments>)
        try:
            <block>
        finally:
            f.close()
    """
    def __init__(self, thing):
        self.thing = thing
    def __enter__(self):
        return self.thing
    def __exit__(self, *exc_info):
        self.thing.close()
 | 
	apache-2.0 | -6,676,445,754,616,710,000 | 26.03268 | 78 | 0.508946 | false | 
| 
	AbrahmAB/sugar | 
	src/jarabe/controlpanel/gui.py | 
	2 | 
	21394 | 
	# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkX11
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
from sugar3.graphics.alert import Alert, TimeoutAlert
from jarabe.model.session import get_session_manager
from jarabe.controlpanel.toolbar import MainToolbar
from jarabe.controlpanel.toolbar import SectionToolbar
from jarabe import config
from jarabe.model import shell
_logger = logging.getLogger('ControlPanel')
class ControlPanel(Gtk.Window):
    __gtype_name__ = 'SugarControlPanel'
    def __init__(self, window_xid=0):
        self.parent_window_xid = window_xid
        Gtk.Window.__init__(self)
        self._calculate_max_columns()
        self.set_border_width(style.LINE_WIDTH)
        self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
        self.set_decorated(False)
        self.set_resizable(False)
        self.set_modal(True)
        self.set_can_focus(True)
        self.connect('key-press-event', self.__key_press_event_cb)
        self._toolbar = None
        self._canvas = None
        self._table = None
        self._scrolledwindow = None
        self._separator = None
        self._section_view = None
        self._section_toolbar = None
        self._main_toolbar = None
        self._vbox = Gtk.VBox()
        self._hbox = Gtk.HBox()
        self._vbox.pack_start(self._hbox, True, True, 0)
        self._hbox.show()
        self._main_view = Gtk.EventBox()
        self._hbox.pack_start(self._main_view, True, True, 0)
        self._main_view.modify_bg(Gtk.StateType.NORMAL,
                                  style.COLOR_BLACK.get_gdk_color())
        self._main_view.show()
        self.add(self._vbox)
        self._vbox.show()
        self.connect('realize', self.__realize_cb)
        self._options = self._get_options()
        self._current_option = None
        self._setup_main()
        self._setup_section()
        self._show_main_view()
        Gdk.Screen.get_default().connect(
            'size-changed', self.__size_changed_cb)
        self._busy_count = 0
        self._selected = []
    def __realize_cb(self, widget):
        self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
        window = self.get_window()
        window.set_accept_focus(True)
        if self.parent_window_xid > 0:
            display = Gdk.Display.get_default()
            parent = GdkX11.X11Window.foreign_new_for_display(
                display, self.parent_window_xid)
            window.set_transient_for(parent)
        # the modal windows counter is updated to disable hot keys - SL#4601
        shell.get_model().push_modal()
    def __size_changed_cb(self, event):
        self._calculate_max_columns()
    def busy(self):
        if self._busy_count == 0:
            self._old_cursor = self.get_window().get_cursor()
            self._set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
        self._busy_count += 1
    def unbusy(self):
        self._busy_count -= 1
        if self._busy_count == 0:
            self._set_cursor(self._old_cursor)
    def _set_cursor(self, cursor):
        self.get_window().set_cursor(cursor)
        Gdk.flush()
    def add_alert(self, alert):
        self._vbox.pack_start(alert, False, False, 0)
        self._vbox.reorder_child(alert, 2)
    def remove_alert(self, alert):
        self._vbox.remove(alert)
    def grab_focus(self):
        # overwrite grab focus in order to grab focus on the view
        self._main_view.get_child().grab_focus()
    def _calculate_max_columns(self):
        self._max_columns = int(0.285 * (float(Gdk.Screen.width()) /
                                         style.GRID_CELL_SIZE - 3))
        offset = style.GRID_CELL_SIZE
        width = Gdk.Screen.width() - offset * 2
        height = Gdk.Screen.height() - offset * 2
        self.set_size_request(width, height)
        if hasattr(self, '_table'):
            for child in self._table.get_children():
                child.destroy()
            self._setup_options()
    def _set_canvas(self, canvas):
        if self._canvas in self._main_view:
            self._main_view.remove(self._canvas)
        if canvas:
            self._main_view.add(canvas)
        self._canvas = canvas
    def _set_toolbar(self, toolbar):
        if self._toolbar:
            self._vbox.remove(self._toolbar)
        self._vbox.pack_start(toolbar, False, False, 0)
        self._vbox.reorder_child(toolbar, 0)
        self._toolbar = toolbar
        if not self._separator:
            self._separator = Gtk.HSeparator()
            self._vbox.pack_start(self._separator, False, False, 0)
            self._vbox.reorder_child(self._separator, 1)
            self._separator.show()
    def _setup_main(self):
        self._main_toolbar = MainToolbar()
        self._table = Gtk.Table()
        self._table.set_col_spacings(style.GRID_CELL_SIZE)
        self._table.set_row_spacings(style.GRID_CELL_SIZE)
        self._table.set_border_width(style.GRID_CELL_SIZE)
        self._scrolledwindow = Gtk.ScrolledWindow()
        self._scrolledwindow.set_can_focus(False)
        self._scrolledwindow.set_policy(Gtk.PolicyType.AUTOMATIC,
                                        Gtk.PolicyType.AUTOMATIC)
        self._scrolledwindow.add_with_viewport(self._table)
        child = self._scrolledwindow.get_child()
        child.modify_bg(
            Gtk.StateType.NORMAL, style.COLOR_BLACK.get_gdk_color())
        self._setup_options()
        self._main_toolbar.connect('stop-clicked',
                                   self.__stop_clicked_cb)
        self._main_toolbar.connect('search-changed',
                                   self.__search_changed_cb)
    def _setup_options(self):
        # If the screen width only supports two columns, start
        # placing from the second row.
        if self._max_columns == 2:
            row = 1
            column = 0
        else:
            # About Me and About my computer are hardcoded below to use the
            # first two slots so we need to leave them free.
            row = 0
            column = 2
        options = self._options.keys()
        options.sort()
        for option in options:
            sectionicon = _SectionIcon(icon_name=self._options[option]['icon'],
                                       title=self._options[option]['title'],
                                       xo_color=self._options[option]['color'],
                                       pixel_size=style.GRID_CELL_SIZE)
            sectionicon.connect('button_press_event',
                                self.__select_option_cb, option)
            sectionicon.show()
            if option == 'aboutme':
                self._table.attach(sectionicon, 0, 1, 0, 1)
            elif option == 'aboutcomputer':
                self._table.attach(sectionicon, 1, 2, 0, 1)
            else:
                self._table.attach(sectionicon,
                                   column, column + 1,
                                   row, row + 1)
                column += 1
                if column == self._max_columns:
                    column = 0
                    row += 1
            self._options[option]['button'] = sectionicon
    def _show_main_view(self):
        if self._section_view is not None:
            self._section_view.destroy()
            self._section_view = None
        self._set_toolbar(self._main_toolbar)
        self._main_toolbar.show()
        self._set_canvas(self._scrolledwindow)
        self._main_view.modify_bg(Gtk.StateType.NORMAL,
                                  style.COLOR_BLACK.get_gdk_color())
        self._table.show()
        self._scrolledwindow.show()
        entry = self._main_toolbar.get_entry()
        entry.set_text('')
        entry.connect('icon-press', self.__clear_icon_pressed_cb)
        self.grab_focus()
    def __key_press_event_cb(self, window, event):
        if event.keyval == Gdk.KEY_Return:
            if len(self._selected) == 1:
                self.show_section_view(self._selected[0])
                return True
        if event.keyval == Gdk.KEY_Escape:
            if self._toolbar == self._main_toolbar:
                self.__stop_clicked_cb(None)
                self.destroy()
            else:
                self.__cancel_clicked_cb(None)
            return True
        # if the user clicked out of the window - fix SL #3188
        if not self.is_active():
            self.present()
        entry = self._main_toolbar.get_entry()
        if not entry.has_focus():
            entry.grab_focus()
        return False
    def __clear_icon_pressed_cb(self, entry, icon_pos, event):
        self.grab_focus()
    def _update(self, query):
        self._selected = []
        for option in self._options:
            found = False
            for key in self._options[option]['keywords']:
                if query.lower() in key.lower():
                    self._options[option]['button'].set_sensitive(True)
                    self._selected.append(option)
                    found = True
                    break
            if not found:
                self._options[option]['button'].set_sensitive(False)
    def _setup_section(self):
        self._section_toolbar = SectionToolbar()
        self._section_toolbar.connect('cancel-clicked',
                                      self.__cancel_clicked_cb)
        self._section_toolbar.connect('accept-clicked',
                                      self.__accept_clicked_cb)
    def show_section_view(self, option):
        self._set_toolbar(self._section_toolbar)
        icon = self._section_toolbar.get_icon()
        icon.set_from_icon_name(self._options[option]['icon'],
                                Gtk.IconSize.LARGE_TOOLBAR)
        icon.props.xo_color = self._options[option]['color']
        title = self._section_toolbar.get_title()
        title.set_text(self._options[option]['title'])
        self._section_toolbar.show()
        self._current_option = option
        mod = __import__('.'.join(('cpsection', option, 'view')),
                         globals(), locals(), ['view'])
        view_class = getattr(mod, self._options[option]['view'], None)
        mod = __import__('.'.join(('cpsection', option, 'model')),
                         globals(), locals(), ['model'])
        model = ModelWrapper(mod)
        try:
            self.busy()
            self._section_view = view_class(model,
                                            self._options[option]['alerts'])
            self._set_canvas(self._section_view)
            self._section_view.show()
        finally:
            self.unbusy()
        self._section_view.connect('notify::is-valid',
                                   self.__valid_section_cb)
        self._section_view.connect('notify::is-cancellable',
                                   self.__cancellable_section_cb)
        self._section_view.connect('request-close',
                                   self.__close_request_cb)
        self._section_view.connect('add-alert',
                                   self.__create_restart_alert_cb)
        self._section_view.connect('set-toolbar-sensitivity',
                                   self.__set_toolbar_sensitivity_cb)
        self._main_view.modify_bg(Gtk.StateType.NORMAL,
                                  style.COLOR_WHITE.get_gdk_color())
    def set_section_view_auto_close(self):
        """Automatically close the control panel if there is "nothing to do"
        """
        self._section_view.auto_close = True
    def _get_options(self):
        """Get the available option information from the extensions
        """
        options = {}
        path = os.path.join(config.ext_path, 'cpsection')
        folder = os.listdir(path)
        for item in folder:
            if os.path.isdir(os.path.join(path, item)) and \
                    os.path.exists(os.path.join(path, item, '__init__.py')):
                try:
                    mod = __import__('.'.join(('cpsection', item)),
                                     globals(), locals(), [item])
                    view_class = getattr(mod, 'CLASS', None)
                    if view_class is not None:
                        options[item] = {}
                        options[item]['alerts'] = []
                        options[item]['view'] = view_class
                        options[item]['icon'] = getattr(mod, 'ICON', item)
                        options[item]['title'] = getattr(mod, 'TITLE', item)
                        options[item]['color'] = getattr(mod, 'COLOR', None)
                        keywords = getattr(mod, 'KEYWORDS', [])
                        keywords.append(options[item]['title'].lower())
                        if item not in keywords:
                            keywords.append(item)
                        options[item]['keywords'] = keywords
                    else:
                        _logger.debug('no CLASS attribute in %r', item)
                except Exception:
                    logging.exception('Exception while loading extension:')
        return options
    def __cancel_clicked_cb(self, widget):
        self._section_view.undo()
        self._options[self._current_option]['alerts'] = []
        self._section_toolbar.accept_button.set_sensitive(True)
        self._show_main_view()
    def __accept_clicked_cb(self, widget):
        if hasattr(self._section_view, "apply"):
            self._section_view.apply()
        if self._section_view.needs_restart:
            self.__set_toolbar_sensitivity_cb(False)
            if self._section_view.show_restart_alert:
                self.__create_restart_alert_cb()
        else:
            self._show_main_view()
    def __set_toolbar_sensitivity_cb(self, value=True,
                                     widget=None, event=None):
        self._section_toolbar.accept_button.set_sensitive(value)
        self._section_toolbar.cancel_button.set_sensitive(value)
    def __create_restart_alert_cb(self, widget=None, event=None):
        alert = Alert()
        alert.props.title = _('Warning')
        alert.props.msg = self._section_view.restart_msg
        if self._section_view.props.is_cancellable:
            icon = Icon(icon_name='dialog-cancel')
            alert.add_button(Gtk.ResponseType.CANCEL,
                             _('Cancel changes'), icon)
            icon.show()
        if self._section_view.props.is_deferrable:
            icon = Icon(icon_name='dialog-ok')
            alert.add_button(Gtk.ResponseType.ACCEPT, _('Later'), icon)
            icon.show()
        icon = Icon(icon_name='system-restart')
        alert.add_button(Gtk.ResponseType.APPLY, _('Restart now'), icon)
        icon.show()
        self.add_alert(alert)
        alert.connect('response', self.__response_cb)
        alert.show()
    def __response_cb(self, alert, response_id):
        self.remove_alert(alert)
        self._section_toolbar.accept_button.set_sensitive(True)
        self._section_toolbar.cancel_button.set_sensitive(True)
        if response_id is Gtk.ResponseType.CANCEL:
            self._section_view.undo()
            self._section_view.setup()
            self._options[self._current_option]['alerts'] = []
        elif response_id is Gtk.ResponseType.ACCEPT:
            self._options[self._current_option]['alerts'] = \
                self._section_view.restart_alerts
            self._show_main_view()
        elif response_id is Gtk.ResponseType.APPLY:
            self.busy()
            self._section_toolbar.accept_button.set_sensitive(False)
            self._section_toolbar.cancel_button.set_sensitive(False)
            get_session_manager().logout()
            GObject.timeout_add_seconds(4, self.__quit_timeout_cb)
    def __quit_timeout_cb(self):
        self.unbusy()
        alert = TimeoutAlert(30)
        alert.props.title = _('An activity is not responding.')
        alert.props.msg = _('You may lose unsaved work if you continue.')
        alert.connect('response', self.__quit_accept_cb)
        self.add_alert(alert)
        alert.show()
    def __quit_accept_cb(self, alert, response_id):
        self.remove_alert(alert)
        if response_id is Gtk.ResponseType.CANCEL:
            get_session_manager().cancel_shutdown()
            self._section_toolbar.accept_button.set_sensitive(True)
            self._section_toolbar.cancel_button.set_sensitive(True)
        else:
            self.busy()
            get_session_manager().shutdown_completed()
    def __select_option_cb(self, button, event, option):
        self.show_section_view(option)
    def __search_changed_cb(self, maintoolbar, query):
        self._update(query)
    def __stop_clicked_cb(self, widget):
        shell.get_model().pop_modal()
        self.destroy()
    def __close_request_cb(self, widget, event=None):
        self.destroy()
    def __valid_section_cb(self, section_view, pspec):
        section_is_valid = section_view.props.is_valid
        self._section_toolbar.accept_button.set_sensitive(section_is_valid)
    def __cancellable_section_cb(self, section_view, pspec):
        cancellable = section_view.props.is_cancellable
        self._section_toolbar.cancel_button.set_sensitive(cancellable)
class ModelWrapper(object):
    def __init__(self, module):
        self._module = module
        self._options = {}
        self._setup()
    def _setup(self):
        methods = dir(self._module)
        for method in methods:
            if method.startswith('get_') and method[4:] != 'color':
                try:
                    self._options[method[4:]] = getattr(self._module, method)()
                except Exception:
                    self._options[method[4:]] = None
    def __getattr__(self, name):
        return getattr(self._module, name)
    def undo(self):
        for key in self._options.keys():
            method = getattr(self._module, 'set_' + key, None)
            if method and self._options[key] is not None:
                try:
                    method(self._options[key])
                except Exception as detail:
                    _logger.debug('Error undo option: %s', detail)
if hasattr(ControlPanel, 'set_css_name'):
    ControlPanel.set_css_name('controlpanel')
class _SectionIcon(Gtk.EventBox):
    __gtype_name__ = 'SugarSectionIcon'
    __gproperties__ = {
        'icon-name': (str, None, None, None, GObject.PARAM_READWRITE),
        'pixel-size': (object, None, None, GObject.PARAM_READWRITE),
        'xo-color': (object, None, None, GObject.PARAM_READWRITE),
        'title': (str, None, None, None, GObject.PARAM_READWRITE),
    }
    def __init__(self, **kwargs):
        self._icon_name = None
        self._pixel_size = style.GRID_CELL_SIZE
        self._xo_color = None
        self._title = 'No Title'
        Gtk.EventBox.__init__(self, **kwargs)
        self._vbox = Gtk.VBox()
        self._icon = Icon(icon_name=self._icon_name,
                          pixel_size=self._pixel_size,
                          xo_color=self._xo_color)
        self._vbox.pack_start(self._icon, expand=False, fill=False, padding=0)
        self._label = Gtk.Label(label=self._title)
        self._label.modify_fg(Gtk.StateType.NORMAL,
                              style.COLOR_WHITE.get_gdk_color())
        self._vbox.pack_start(self._label, expand=False, fill=False, padding=0)
        self._vbox.set_spacing(style.DEFAULT_SPACING)
        self.set_visible_window(False)
        self.set_app_paintable(True)
        self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)
        self.add(self._vbox)
        self._vbox.show()
        self._label.show()
        self._icon.show()
    def get_icon(self):
        return self._icon
    def do_set_property(self, pspec, value):
        if pspec.name == 'icon-name':
            if self._icon_name != value:
                self._icon_name = value
        elif pspec.name == 'pixel-size':
            if self._pixel_size != value:
                self._pixel_size = value
        elif pspec.name == 'xo-color':
            if self._xo_color != value:
                self._xo_color = value
        elif pspec.name == 'title':
            if self._title != value:
                self._title = value
    def do_get_property(self, pspec):
        if pspec.name == 'icon-name':
            return self._icon_name
        elif pspec.name == 'pixel-size':
            return self._pixel_size
        elif pspec.name == 'xo-color':
            return self._xo_color
        elif pspec.name == 'title':
            return self._title
 | 
	gpl-3.0 | -4,915,852,698,902,648,000 | 36.271777 | 79 | 0.558708 | false | 
| 
	pegasus-isi/pegasus | 
	test/core/010-runtime-clustering/cluster.py | 
	1 | 
	8643 | 
	#!/usr/bin/env python3
import os
import argparse
import configparser
import logging
import sys
import logging
import subprocess
from datetime import datetime
from pathlib import Path
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
def parse_args(args=sys.argv[1:]):
    parser = argparse.ArgumentParser(description="Runtime Cluster Test Workflow")
    parser.add_argument(
        "pegasus_keg_path",
        help="abs path to pegasus-keg install (e.g '/usr/bin/pegasus-keg')",
        metavar="PEGASUS_KEG_PATH",
    )
    parser.add_argument(
        "config_dir",
        help="name of test config dir (e.g. 'runtime-condorio', 'runtime-nonsharedfs'",
    )
    return parser.parse_args(args)
def write_sc(top_dir: Path, run_id: str):
    # get pegasus version
    cp = subprocess.run(
        ["pegasus-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
    )
    if cp.returncode != 0:
        raise RuntimeError(
            "unable to call pegasus-version: {}".format(cp.stderr.decode().strip())
        )
    REMOTE_PEGASUS_HOME = "/lizard/scratch-90-days/bamboo/installs/pegasus-{}".format(
        cp.stdout.decode().strip()
    )
    sc = SiteCatalog()
    # --- cartman-data site ----------------------------------------------------
    cartman_data = Site(name="cartman-data", arch=Arch.X86_64, os_type=OS.LINUX)
    cartman_data.add_directories(
        Directory(
            Directory.SHARED_SCRATCH,
            top_dir / "staging-site/scratch",
        ).add_file_servers(
            FileServer(
                "gsiftp://bamboo.isi.edu" + str(top_dir / "staging-site/scratch"),
                Operation.ALL,
            )
        )
    )
    cartman_data.add_env(PEGASUS_HOME=REMOTE_PEGASUS_HOME)
    sc.add_sites(cartman_data)
    # --- condorpool site ------------------------------------------------------
    condorpool = Site(name="condorpool", arch=Arch.X86_64, os_type=OS.LINUX)
    condorpool.add_condor_profile(universe="vanilla")
    condorpool.add_pegasus_profile(style="condor")
    sc.add_sites(condorpool)
    # --- sharedfs site --------------------------------------------------------
    sharedfs = Site(name="sharedfs", arch=Arch.X86_64, os_type=OS.LINUX)
    sharedfs_dir1 = Directory(
        Directory.SHARED_STORAGE,
        Path("/lizard/scratch-90-days")
        / os.getenv("USER")
        / "storage/black-diamond-output"
        / run_id,
    )
    sharedfs_dir1.add_file_servers(
        FileServer(
            "file://"
            + str(
                Path("/lizard/scratch-90-days")
                / os.getenv("USER")
                / "storage/black-diamond-output"
                / run_id
            ),
            Operation.ALL,
        )
    )
    sharedfs.add_directories(sharedfs_dir1)
    sharedfs_dir2 = Directory(
        Directory.SHARED_SCRATCH,
        Path("/lizard/scratch-90-days") / os.getenv("USER") / "scratch" / run_id,
    )
    sharedfs_dir2.add_file_servers(
        FileServer(
            "file://"
            + str(
                Path("/lizard/scratch-90-days") / os.getenv("USER") / "scratch" / run_id
            ),
            Operation.ALL,
        )
    )
    sharedfs.add_directories(sharedfs_dir2)
    sharedfs.add_env(PEGASUS_HOME=REMOTE_PEGASUS_HOME)
    sharedfs.add_condor_profile(
        should_transfer_files="Yes",
        universe="vanilla",
        when_to_transfer_output="ON_EXIT",
    )
    sharedfs.add_pegasus_profile(style="condor")
    sc.add_sites(sharedfs)
    # --- local site -----------------------------------------------------------
    local_site_url = config.get("all", "local_site_url", fallback="")
    local = Site(name="local", arch=Arch.X86_64, os_type=OS.LINUX)
    local_dir1 = Directory(Directory.SHARED_STORAGE, top_dir / "outputs")
    local_dir1.add_file_servers(
        FileServer(local_site_url + str(top_dir / "outputs"), Operation.ALL)
    )
    local.add_directories(local_dir1)
    local_dir2 = Directory(Directory.SHARED_SCRATCH, top_dir / "work")
    local_dir2.add_file_servers(
        FileServer(local_site_url + str(top_dir / "work"), Operation.ALL)
    )
    local.add_directories(local_dir2)
    sc.add_sites(local)
    # write
    sc.write()
def write_rc(config: configparser.ConfigParser):
    input_file = config.get("all", "input_file")
    if input_file == "":
        input_file = Path("f.a")
    else:
        # is a directory such as '/lizard/scratch-90-days'
        input_dir = Path(input_file) / os.getenv("USER") / "inputs"
        input_dir.mkdir(parents=True, exist_ok=True)
        input_file = input_dir / "f.a"
    with input_file.open("w") as f:
        f.write("This is sample input to KEG")
    rc = ReplicaCatalog()
    rc.add_replica(
        site=config.get("all", "file_site"), lfn="f.a", pfn=input_file.resolve()
    )
    rc.write()
def write_tc(config: configparser.ConfigParser, pegasus_keg_path: str):
    tc = TransformationCatalog()
    for i in range(1, 3):
        sleep = Transformation(
            namespace="cluster",
            name="level{}".format(i),
            version="1.0",
            site=config.get("all", "executable_site"),
            pfn=config.get("all", "executable_url") + pegasus_keg_path,
            is_stageable=True,
            os_type=OS.LINUX,
            arch=Arch.X86_64,
        )
        sleep.add_pegasus_profile(
            clusters_size=config.get("all", "clusters_size"),
            clusters_max_runtime=config.get("all", "clusters_maxruntime"),
        )
        tc.add_transformations(sleep)
    tc.write()
if __name__ == "__main__":
    args = parse_args()
    TOP_DIR = Path().cwd().resolve()
    RUN_ID = datetime.now().strftime("%Y%m%d_%H%M%S")
    # --- validate test config dir ---------------------------------------------
    config_dir = Path(__file__).parent / args.config_dir
    if not config_dir.is_dir():
        raise ValueError(
            "config_dir: {} does not a directory or does not exist".format(config_dir)
        )
    config_file = config_dir / "test.config"
    if not config_file.is_file():
        raise ValueError("{} does not contain required file: {}".format(config_file))
    # --- general test config --------------------------------------------------
    config = configparser.ConfigParser(
        {
            "input_file": "",
            "workflow_name": "horizontal-clustering-test",
            "clusters_size": "3",
            "clusters_maxruntime": "7",
        }
    )
    config.read(str(config_file))
    # --- catalogs -------------------------------------------------------------
    write_sc(TOP_DIR, RUN_ID)
    write_rc(config)
    write_tc(config, args.pegasus_keg_path)
    # --- workflow -------------------------------------------------------------
    wf = Workflow(config.get("all", "workflow_name"))
    input_file = File("f.a")
    # create 4 lvl1 jobs
    for i in range(4):
        job = (
            Job(namespace="cluster", transformation="level1", version="1.0")
            .add_args("-a", "level1", "-T", i + 1, "-i", input_file)
            .add_inputs(input_file)
            .add_profiles(Namespace.PEGASUS, key="job.runtime", value=i + 1)
        )
        wf.add_jobs(job)
        # for each lvl1 job, create 4 lvl2 children
        for j in range(4):
            child = (
                Job(namespace="cluster", transformation="level2", version="1.0")
                .add_args("-a", "level2", "-T", ((j + 1) * 2))
                .add_profiles(Namespace.PEGASUS, key="runtime", value=((j + 1) * 2))
            )
            wf.add_jobs(child)
            wf.add_dependency(job=job, children=[child])
    # plan and run
    execution_site = config.get("all", "execution_site", fallback="local")
    staging_site = config.get("all", "staging_site", fallback="local")
    output_site = config.get("all", "output_site", fallback="local")
    top_pegasusrc = Path(__file__).parent / "pegasusrc"
    pegasusrc = config_dir / "pegasusrc"
    # include anything in __file__/pegasusrc in ./config_dir/pegasusrc
    with top_pegasusrc.open("r") as top_cfg, pegasusrc.open("a") as cfg:
        cfg.write(top_cfg.read())
    try:
        wf.plan(
            conf=str(pegasusrc),
            sites=[execution_site],
            staging_sites={execution_site: staging_site},
            output_sites=[output_site],
            dir="work/submit",
            cleanup="leaf",
            cluster=["horizontal"],
            verbose=3,
            submit=True,
        ).wait().analyze().statistics()
    except PegasusClientError as e:
        print(e)
        print(e.result.stdout)
 | 
	apache-2.0 | -8,122,236,989,658,102,000 | 31.011111 | 88 | 0.548652 | false | 
| 
	bramd/django-phonenumber-field | 
	setup.py | 
	1 | 
	1568 | 
	from setuptools import setup, find_packages
from phonenumber_field import __version__
setup(
    name="django-phonenumber-field",
    version=__version__,
    url='http://github.com/stefanfoulis/django-phonenumber-field',
    license='BSD',
    platforms=['OS Independent'],
    description="An international phone number field for django models.",
    install_requires=[
        'phonenumbers>=7.0.2',
        'babel',
    ],
    long_description=open('README.rst').read(),
    author='Stefan Foulis',
    author_email='[email protected]',
    maintainer='Stefan Foulis',
    maintainer_email='[email protected]',
    packages=find_packages(),
    package_data = {
        'phonenumber_field': [
            'locale/*/LC_MESSAGES/*',
        ],
    },
    include_package_data=True,
    zip_safe=False,
    classifiers=[
        'Development Status :: 4 - Beta',
        'Framework :: Django',
        'Intended Audience :: Developers',
        'License :: OSI Approved :: BSD License',
        'Operating System :: OS Independent',
        'Programming Language :: Python',
        'Programming Language :: Python :: 2',
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3',
        'Programming Language :: Python :: 3.3',
        'Programming Language :: Python :: 3.4',
        'Programming Language :: Python :: 3.5',
        'Programming Language :: Python :: Implementation :: CPython',
        'Programming Language :: Python :: Implementation :: PyPy',
        'Topic :: Internet :: WWW/HTTP',
    ]
)
 | 
	mit | -4,853,339,113,309,659,000 | 33.086957 | 73 | 0.604592 | false | 
| 
	quang-ha/lammps | 
	tools/moltemplate/moltemplate/remove_duplicate_atoms.py | 
	11 | 
	1467 | 
	#!/usr/bin/env python
"""
   Get rid of lines containing duplicate copies of the same atom in the "Atoms"
   section of a LAMMPS data file.  Duplicate lines which occur later are
   preserved and the earlier lines are erased.
   The file is read from sys.stdin.  This program does not parse the entire
   data file.  The text from the "Atoms" section of the LAMMPS file must
   be extracted in advance before it is sent to this program.)
"""
import sys
def main():
    in_stream = sys.stdin
    f = None
    fname = None
    if len(sys.argv) == 2:
        fname = sys.argv[1]
        f = open(fname, 'r')
        in_stream = f
    atom_ids_in_use = set([])
    lines = in_stream.readlines()
    # Start at the end of the file and read backwards.
    # If duplicate lines exist, eliminate the ones that occur earlier in the file.
    i = len(lines)
    while i > 0:
        i -= 1
        line_orig = lines[i]
        line = line_orig.rstrip('\n')
        if '#' in line_orig:
            ic = line.find('#')
            line = line_orig[:ic]
        tokens = line.strip().split()
        if len(tokens) > 0:
            atom_id = tokens[0]
            if atom_id in atom_ids_in_use:
                del lines[i]
            else:
                atom_ids_in_use.add(atom_id)
        else:
            del lines[i]
    for line in lines:
        sys.stdout.write(line)
    if f != None:
        f.close()
    return
if __name__ == '__main__':
    main()
 | 
	gpl-2.0 | -9,153,415,108,029,633,000 | 23.45 | 82 | 0.558964 | false | 
| 
	hydroshare/hydroshare_temp | 
	hs_party/models/group_association.py | 
	1 | 
	2255 | 
	from django.contrib.contenttypes import generic
from django.contrib.auth.models import User, Group
from django.db import models
from mezzanine.pages.models import Page, RichText,Displayable
from mezzanine.core.fields import FileField, RichTextField
from mezzanine.core.models import Ownable
from mezzanine.generic.models import Keyword, Orderable
from hs_core.models import AbstractResource
from django.db.models.signals import  post_save
from datetime import date
from uuid import uuid4
from django.db.models.signals import post_save,pre_save,post_init
from django.contrib.auth.signals import user_logged_in
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist,ValidationError
from django.core.urlresolvers import reverse
from .party import Party
from .party_types import PartyEmailModel,PartyGeolocation,PartyPhoneModel,PartyLocationModel
from .activities import ActivitiesModel
from .person import Person
from .organization import Organization
__author__ = 'valentin'
class GroupAssociation( ActivitiesModel):
    # object to handle a person being in one or more organizations
    #organization = models.ForeignKey(Organization)
    uniqueCode = models.CharField(max_length=64,default=lambda: str(uuid4()),verbose_name="A unique code for the record", help_text="A unique code for the record")
    group = models.ForeignKey(Group)
    #person = models.ForeignKey(Person)
    person = models.ForeignKey(Person)
    beginDate = models.DateField(null=True,blank=True,verbose_name="begin date of associate, Empty is not know.")
    endDate = models.DateField(null=True,blank=True, verbose_name="End date of association. Empty if still with group")
    positionName = models.CharField(verbose_name="Position, empty is not known", blank=True,max_length='100')
    def __unicode__(self):
        if (self.beginDate):
            if (self.endDate):
                range=u' [%s, %s]' % (self.beginDate,self.endDate)
            else:
                range=u' [%s]' % (self.beginDate)
        else:
            range=''
        if (self.jobTitle):
            title = ' ,' + self.jobTitle
        return u'%s (%s%s%s)' % (self.person.name, self.group.name,title,range)
    class Meta:
        app_label = 'hs_party' | 
	bsd-3-clause | -6,585,716,426,862,214,000 | 40.777778 | 163 | 0.734368 | false | 
| 
	stewartsmith/bzr | 
	bzrlib/index.py | 
	2 | 
	80106 | 
	# Copyright (C) 2007-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Indexing facilities."""
from __future__ import absolute_import
__all__ = [
    'CombinedGraphIndex',
    'GraphIndex',
    'GraphIndexBuilder',
    'GraphIndexPrefixAdapter',
    'InMemoryGraphIndex',
    ]
from bisect import bisect_right
from cStringIO import StringIO
import re
import sys
from bzrlib.lazy_import import lazy_import
lazy_import(globals(), """
from bzrlib import (
    bisect_multi,
    revision as _mod_revision,
    trace,
    )
""")
from bzrlib import (
    debug,
    errors,
    )
from bzrlib.static_tuple import StaticTuple
_HEADER_READV = (0, 200)
_OPTION_KEY_ELEMENTS = "key_elements="
_OPTION_LEN = "len="
_OPTION_NODE_REFS = "node_ref_lists="
_SIGNATURE = "Bazaar Graph Index 1\n"
_whitespace_re = re.compile('[\t\n\x0b\x0c\r\x00 ]')
_newline_null_re = re.compile('[\n\0]')
def _has_key_from_parent_map(self, key):
    """Check if this index has one key.
    If it's possible to check for multiple keys at once through
    calling get_parent_map that should be faster.
    """
    return (key in self.get_parent_map([key]))
def _missing_keys_from_parent_map(self, keys):
    return set(keys) - set(self.get_parent_map(keys))
class GraphIndexBuilder(object):
    """A builder that can build a GraphIndex.
    The resulting graph has the structure::
      _SIGNATURE OPTIONS NODES NEWLINE
      _SIGNATURE     := 'Bazaar Graph Index 1' NEWLINE
      OPTIONS        := 'node_ref_lists=' DIGITS NEWLINE
      NODES          := NODE*
      NODE           := KEY NULL ABSENT? NULL REFERENCES NULL VALUE NEWLINE
      KEY            := Not-whitespace-utf8
      ABSENT         := 'a'
      REFERENCES     := REFERENCE_LIST (TAB REFERENCE_LIST){node_ref_lists - 1}
      REFERENCE_LIST := (REFERENCE (CR REFERENCE)*)?
      REFERENCE      := DIGITS  ; digits is the byte offset in the index of the
                                ; referenced key.
      VALUE          := no-newline-no-null-bytes
    """
    def __init__(self, reference_lists=0, key_elements=1):
        """Create a GraphIndex builder.
        :param reference_lists: The number of node references lists for each
            entry.
        :param key_elements: The number of bytestrings in each key.
        """
        self.reference_lists = reference_lists
        # A dict of {key: (absent, ref_lists, value)}
        self._nodes = {}
        # Keys that are referenced but not actually present in this index
        self._absent_keys = set()
        self._nodes_by_key = None
        self._key_length = key_elements
        self._optimize_for_size = False
        self._combine_backing_indices = True
    def _check_key(self, key):
        """Raise BadIndexKey if key is not a valid key for this index."""
        if type(key) not in (tuple, StaticTuple):
            raise errors.BadIndexKey(key)
        if self._key_length != len(key):
            raise errors.BadIndexKey(key)
        for element in key:
            if not element or _whitespace_re.search(element) is not None:
                raise errors.BadIndexKey(element)
    def _external_references(self):
        """Return references that are not present in this index.
        """
        keys = set()
        refs = set()
        # TODO: JAM 2008-11-21 This makes an assumption about how the reference
        #       lists are used. It is currently correct for pack-0.92 through
        #       1.9, which use the node references (3rd column) second
        #       reference list as the compression parent. Perhaps this should
        #       be moved into something higher up the stack, since it
        #       makes assumptions about how the index is used.
        if self.reference_lists > 1:
            for node in self.iter_all_entries():
                keys.add(node[1])
                refs.update(node[3][1])
            return refs - keys
        else:
            # If reference_lists == 0 there can be no external references, and
            # if reference_lists == 1, then there isn't a place to store the
            # compression parent
            return set()
    def _get_nodes_by_key(self):
        if self._nodes_by_key is None:
            nodes_by_key = {}
            if self.reference_lists:
                for key, (absent, references, value) in self._nodes.iteritems():
                    if absent:
                        continue
                    key_dict = nodes_by_key
                    for subkey in key[:-1]:
                        key_dict = key_dict.setdefault(subkey, {})
                    key_dict[key[-1]] = key, value, references
            else:
                for key, (absent, references, value) in self._nodes.iteritems():
                    if absent:
                        continue
                    key_dict = nodes_by_key
                    for subkey in key[:-1]:
                        key_dict = key_dict.setdefault(subkey, {})
                    key_dict[key[-1]] = key, value
            self._nodes_by_key = nodes_by_key
        return self._nodes_by_key
    def _update_nodes_by_key(self, key, value, node_refs):
        """Update the _nodes_by_key dict with a new key.
        For a key of (foo, bar, baz) create
        _nodes_by_key[foo][bar][baz] = key_value
        """
        if self._nodes_by_key is None:
            return
        key_dict = self._nodes_by_key
        if self.reference_lists:
            key_value = StaticTuple(key, value, node_refs)
        else:
            key_value = StaticTuple(key, value)
        for subkey in key[:-1]:
            key_dict = key_dict.setdefault(subkey, {})
        key_dict[key[-1]] = key_value
    def _check_key_ref_value(self, key, references, value):
        """Check that 'key' and 'references' are all valid.
        :param key: A key tuple. Must conform to the key interface (be a tuple,
            be of the right length, not have any whitespace or nulls in any key
            element.)
        :param references: An iterable of reference lists. Something like
            [[(ref, key)], [(ref, key), (other, key)]]
        :param value: The value associate with this key. Must not contain
            newlines or null characters.
        :return: (node_refs, absent_references)
        
            * node_refs: basically a packed form of 'references' where all
              iterables are tuples
            * absent_references: reference keys that are not in self._nodes.
              This may contain duplicates if the same key is referenced in
              multiple lists.
        """
        as_st = StaticTuple.from_sequence
        self._check_key(key)
        if _newline_null_re.search(value) is not None:
            raise errors.BadIndexValue(value)
        if len(references) != self.reference_lists:
            raise errors.BadIndexValue(references)
        node_refs = []
        absent_references = []
        for reference_list in references:
            for reference in reference_list:
                # If reference *is* in self._nodes, then we know it has already
                # been checked.
                if reference not in self._nodes:
                    self._check_key(reference)
                    absent_references.append(reference)
            reference_list = as_st([as_st(ref).intern()
                                    for ref in reference_list])
            node_refs.append(reference_list)
        return as_st(node_refs), absent_references
    def add_node(self, key, value, references=()):
        """Add a node to the index.
        :param key: The key. keys are non-empty tuples containing
            as many whitespace-free utf8 bytestrings as the key length
            defined for this index.
        :param references: An iterable of iterables of keys. Each is a
            reference to another key.
        :param value: The value to associate with the key. It may be any
            bytes as long as it does not contain \\0 or \\n.
        """
        (node_refs,
         absent_references) = self._check_key_ref_value(key, references, value)
        if key in self._nodes and self._nodes[key][0] != 'a':
            raise errors.BadIndexDuplicateKey(key, self)
        for reference in absent_references:
            # There may be duplicates, but I don't think it is worth worrying
            # about
            self._nodes[reference] = ('a', (), '')
        self._absent_keys.update(absent_references)
        self._absent_keys.discard(key)
        self._nodes[key] = ('', node_refs, value)
        if self._nodes_by_key is not None and self._key_length > 1:
            self._update_nodes_by_key(key, value, node_refs)
    def clear_cache(self):
        """See GraphIndex.clear_cache()
        This is a no-op, but we need the api to conform to a generic 'Index'
        abstraction.
        """
        
    def finish(self):
        """Finish the index.
        :returns: cStringIO holding the full context of the index as it 
        should be written to disk.
        """
        lines = [_SIGNATURE]
        lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
        lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
        key_count = len(self._nodes) - len(self._absent_keys)
        lines.append(_OPTION_LEN + str(key_count) + '\n')
        prefix_length = sum(len(x) for x in lines)
        # references are byte offsets. To avoid having to do nasty
        # polynomial work to resolve offsets (references to later in the
        # file cannot be determined until all the inbetween references have
        # been calculated too) we pad the offsets with 0's to make them be
        # of consistent length. Using binary offsets would break the trivial
        # file parsing.
        # to calculate the width of zero's needed we do three passes:
        # one to gather all the non-reference data and the number of references.
        # one to pad all the data with reference-length and determine entry
        # addresses.
        # One to serialise.
        # forward sorted by key. In future we may consider topological sorting,
        # at the cost of table scans for direct lookup, or a second index for
        # direct lookup
        nodes = sorted(self._nodes.items())
        # if we do not prepass, we don't know how long it will be up front.
        expected_bytes = None
        # we only need to pre-pass if we have reference lists at all.
        if self.reference_lists:
            key_offset_info = []
            non_ref_bytes = prefix_length
            total_references = 0
            # TODO use simple multiplication for the constants in this loop.
            for key, (absent, references, value) in nodes:
                # record the offset known *so far* for this key:
                # the non reference bytes to date, and the total references to
                # date - saves reaccumulating on the second pass
                key_offset_info.append((key, non_ref_bytes, total_references))
                # key is literal, value is literal, there are 3 null's, 1 NL
                # key is variable length tuple, \x00 between elements
                non_ref_bytes += sum(len(element) for element in key)
                if self._key_length > 1:
                    non_ref_bytes += self._key_length - 1
                # value is literal bytes, there are 3 null's, 1 NL.
                non_ref_bytes += len(value) + 3 + 1
                # one byte for absent if set.
                if absent:
                    non_ref_bytes += 1
                elif self.reference_lists:
                    # (ref_lists -1) tabs
                    non_ref_bytes += self.reference_lists - 1
                    # (ref-1 cr's per ref_list)
                    for ref_list in references:
                        # how many references across the whole file?
                        total_references += len(ref_list)
                        # accrue reference separators
                        if ref_list:
                            non_ref_bytes += len(ref_list) - 1
            # how many digits are needed to represent the total byte count?
            digits = 1
            possible_total_bytes = non_ref_bytes + total_references*digits
            while 10 ** digits < possible_total_bytes:
                digits += 1
                possible_total_bytes = non_ref_bytes + total_references*digits
            expected_bytes = possible_total_bytes + 1 # terminating newline
            # resolve key addresses.
            key_addresses = {}
            for key, non_ref_bytes, total_references in key_offset_info:
                key_addresses[key] = non_ref_bytes + total_references*digits
            # serialise
            format_string = '%%0%sd' % digits
        for key, (absent, references, value) in nodes:
            flattened_references = []
            for ref_list in references:
                ref_addresses = []
                for reference in ref_list:
                    ref_addresses.append(format_string % key_addresses[reference])
                flattened_references.append('\r'.join(ref_addresses))
            string_key = '\x00'.join(key)
            lines.append("%s\x00%s\x00%s\x00%s\n" % (string_key, absent,
                '\t'.join(flattened_references), value))
        lines.append('\n')
        result = StringIO(''.join(lines))
        if expected_bytes and len(result.getvalue()) != expected_bytes:
            raise errors.BzrError('Failed index creation. Internal error:'
                ' mismatched output length and expected length: %d %d' %
                (len(result.getvalue()), expected_bytes))
        return result
    def set_optimize(self, for_size=None, combine_backing_indices=None):
        """Change how the builder tries to optimize the result.
        :param for_size: Tell the builder to try and make the index as small as
            possible.
        :param combine_backing_indices: If the builder spills to disk to save
            memory, should the on-disk indices be combined. Set to True if you
            are going to be probing the index, but to False if you are not. (If
            you are not querying, then the time spent combining is wasted.)
        :return: None
        """
        # GraphIndexBuilder itself doesn't pay attention to the flag yet, but
        # other builders do.
        if for_size is not None:
            self._optimize_for_size = for_size
        if combine_backing_indices is not None:
            self._combine_backing_indices = combine_backing_indices
    def find_ancestry(self, keys, ref_list_num):
        """See CombinedGraphIndex.find_ancestry()"""
        pending = set(keys)
        parent_map = {}
        missing_keys = set()
        while pending:
            next_pending = set()
            for _, key, value, ref_lists in self.iter_entries(pending):
                parent_keys = ref_lists[ref_list_num]
                parent_map[key] = parent_keys
                next_pending.update([p for p in parent_keys if p not in
                                     parent_map])
                missing_keys.update(pending.difference(parent_map))
            pending = next_pending
        return parent_map, missing_keys
class GraphIndex(object):
    """An index for data with embedded graphs.
    The index maps keys to a list of key reference lists, and a value.
    Each node has the same number of key reference lists. Each key reference
    list can be empty or an arbitrary length. The value is an opaque NULL
    terminated string without any newlines. The storage of the index is
    hidden in the interface: keys and key references are always tuples of
    bytestrings, never the internal representation (e.g. dictionary offsets).
    It is presumed that the index will not be mutated - it is static data.
    Successive iter_all_entries calls will read the entire index each time.
    Additionally, iter_entries calls will read the index linearly until the
    desired keys are found. XXX: This must be fixed before the index is
    suitable for production use. :XXX
    """
    def __init__(self, transport, name, size, unlimited_cache=False, offset=0):
        """Open an index called name on transport.
        :param transport: A bzrlib.transport.Transport.
        :param name: A path to provide to transport API calls.
        :param size: The size of the index in bytes. This is used for bisection
            logic to perform partial index reads. While the size could be
            obtained by statting the file this introduced an additional round
            trip as well as requiring stat'able transports, both of which are
            avoided by having it supplied. If size is None, then bisection
            support will be disabled and accessing the index will just stream
            all the data.
        :param offset: Instead of starting the index data at offset 0, start it
            at an arbitrary offset.
        """
        self._transport = transport
        self._name = name
        # Becomes a dict of key:(value, reference-list-byte-locations) used by
        # the bisection interface to store parsed but not resolved keys.
        self._bisect_nodes = None
        # Becomes a dict of key:(value, reference-list-keys) which are ready to
        # be returned directly to callers.
        self._nodes = None
        # a sorted list of slice-addresses for the parsed bytes of the file.
        # e.g. (0,1) would mean that byte 0 is parsed.
        self._parsed_byte_map = []
        # a sorted list of keys matching each slice address for parsed bytes
        # e.g. (None, 'foo@bar') would mean that the first byte contained no
        # key, and the end byte of the slice is the of the data for 'foo@bar'
        self._parsed_key_map = []
        self._key_count = None
        self._keys_by_offset = None
        self._nodes_by_key = None
        self._size = size
        # The number of bytes we've read so far in trying to process this file
        self._bytes_read = 0
        self._base_offset = offset
    def __eq__(self, other):
        """Equal when self and other were created with the same parameters."""
        return (
            type(self) == type(other) and
            self._transport == other._transport and
            self._name == other._name and
            self._size == other._size)
    def __ne__(self, other):
        return not self.__eq__(other)
    def __repr__(self):
        return "%s(%r)" % (self.__class__.__name__,
            self._transport.abspath(self._name))
    def _buffer_all(self, stream=None):
        """Buffer all the index data.
        Mutates self._nodes and self.keys_by_offset.
        """
        if self._nodes is not None:
            # We already did this
            return
        if 'index' in debug.debug_flags:
            trace.mutter('Reading entire index %s',
                          self._transport.abspath(self._name))
        if stream is None:
            stream = self._transport.get(self._name)
            if self._base_offset != 0:
                # This is wasteful, but it is better than dealing with
                # adjusting all the offsets, etc.
                stream = StringIO(stream.read()[self._base_offset:])
        self._read_prefix(stream)
        self._expected_elements = 3 + self._key_length
        line_count = 0
        # raw data keyed by offset
        self._keys_by_offset = {}
        # ready-to-return key:value or key:value, node_ref_lists
        self._nodes = {}
        self._nodes_by_key = None
        trailers = 0
        pos = stream.tell()
        lines = stream.read().split('\n')
        # GZ 2009-09-20: Should really use a try/finally block to ensure close
        stream.close()
        del lines[-1]
        _, _, _, trailers = self._parse_lines(lines, pos)
        for key, absent, references, value in self._keys_by_offset.itervalues():
            if absent:
                continue
            # resolve references:
            if self.node_ref_lists:
                node_value = (value, self._resolve_references(references))
            else:
                node_value = value
            self._nodes[key] = node_value
        # cache the keys for quick set intersections
        if trailers != 1:
            # there must be one line - the empty trailer line.
            raise errors.BadIndexData(self)
    def clear_cache(self):
        """Clear out any cached/memoized values.
        This can be called at any time, but generally it is used when we have
        extracted some information, but don't expect to be requesting any more
        from this index.
        """
    def external_references(self, ref_list_num):
        """Return references that are not present in this index.
        """
        self._buffer_all()
        if ref_list_num + 1 > self.node_ref_lists:
            raise ValueError('No ref list %d, index has %d ref lists'
                % (ref_list_num, self.node_ref_lists))
        refs = set()
        nodes = self._nodes
        for key, (value, ref_lists) in nodes.iteritems():
            ref_list = ref_lists[ref_list_num]
            refs.update([ref for ref in ref_list if ref not in nodes])
        return refs
    def _get_nodes_by_key(self):
        if self._nodes_by_key is None:
            nodes_by_key = {}
            if self.node_ref_lists:
                for key, (value, references) in self._nodes.iteritems():
                    key_dict = nodes_by_key
                    for subkey in key[:-1]:
                        key_dict = key_dict.setdefault(subkey, {})
                    key_dict[key[-1]] = key, value, references
            else:
                for key, value in self._nodes.iteritems():
                    key_dict = nodes_by_key
                    for subkey in key[:-1]:
                        key_dict = key_dict.setdefault(subkey, {})
                    key_dict[key[-1]] = key, value
            self._nodes_by_key = nodes_by_key
        return self._nodes_by_key
    def iter_all_entries(self):
        """Iterate over all keys within the index.
        :return: An iterable of (index, key, value) or (index, key, value, reference_lists).
            The former tuple is used when there are no reference lists in the
            index, making the API compatible with simple key:value index types.
            There is no defined order for the result iteration - it will be in
            the most efficient order for the index.
        """
        if 'evil' in debug.debug_flags:
            trace.mutter_callsite(3,
                "iter_all_entries scales with size of history.")
        if self._nodes is None:
            self._buffer_all()
        if self.node_ref_lists:
            for key, (value, node_ref_lists) in self._nodes.iteritems():
                yield self, key, value, node_ref_lists
        else:
            for key, value in self._nodes.iteritems():
                yield self, key, value
    def _read_prefix(self, stream):
        signature = stream.read(len(self._signature()))
        if not signature == self._signature():
            raise errors.BadIndexFormatSignature(self._name, GraphIndex)
        options_line = stream.readline()
        if not options_line.startswith(_OPTION_NODE_REFS):
            raise errors.BadIndexOptions(self)
        try:
            self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):-1])
        except ValueError:
            raise errors.BadIndexOptions(self)
        options_line = stream.readline()
        if not options_line.startswith(_OPTION_KEY_ELEMENTS):
            raise errors.BadIndexOptions(self)
        try:
            self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):-1])
        except ValueError:
            raise errors.BadIndexOptions(self)
        options_line = stream.readline()
        if not options_line.startswith(_OPTION_LEN):
            raise errors.BadIndexOptions(self)
        try:
            self._key_count = int(options_line[len(_OPTION_LEN):-1])
        except ValueError:
            raise errors.BadIndexOptions(self)
    def _resolve_references(self, references):
        """Return the resolved key references for references.
        References are resolved by looking up the location of the key in the
        _keys_by_offset map and substituting the key name, preserving ordering.
        :param references: An iterable of iterables of key locations. e.g.
            [[123, 456], [123]]
        :return: A tuple of tuples of keys.
        """
        node_refs = []
        for ref_list in references:
            node_refs.append(tuple([self._keys_by_offset[ref][0] for ref in ref_list]))
        return tuple(node_refs)
    def _find_index(self, range_map, key):
        """Helper for the _parsed_*_index calls.
        Given a range map - [(start, end), ...], finds the index of the range
        in the map for key if it is in the map, and if it is not there, the
        immediately preceeding range in the map.
        """
        result = bisect_right(range_map, key) - 1
        if result + 1 < len(range_map):
            # check the border condition, it may be in result + 1
            if range_map[result + 1][0] == key[0]:
                return result + 1
        return result
    def _parsed_byte_index(self, offset):
        """Return the index of the entry immediately before offset.
        e.g. if the parsed map has regions 0,10 and 11,12 parsed, meaning that
        there is one unparsed byte (the 11th, addressed as[10]). then:
        asking for 0 will return 0
        asking for 10 will return 0
        asking for 11 will return 1
        asking for 12 will return 1
        """
        key = (offset, 0)
        return self._find_index(self._parsed_byte_map, key)
    def _parsed_key_index(self, key):
        """Return the index of the entry immediately before key.
        e.g. if the parsed map has regions (None, 'a') and ('b','c') parsed,
        meaning that keys from None to 'a' inclusive, and 'b' to 'c' inclusive
        have been parsed, then:
        asking for '' will return 0
        asking for 'a' will return 0
        asking for 'b' will return 1
        asking for 'e' will return 1
        """
        search_key = (key, None)
        return self._find_index(self._parsed_key_map, search_key)
    def _is_parsed(self, offset):
        """Returns True if offset has been parsed."""
        index = self._parsed_byte_index(offset)
        if index == len(self._parsed_byte_map):
            return offset < self._parsed_byte_map[index - 1][1]
        start, end = self._parsed_byte_map[index]
        return offset >= start and offset < end
    def _iter_entries_from_total_buffer(self, keys):
        """Iterate over keys when the entire index is parsed."""
        # Note: See the note in BTreeBuilder.iter_entries for why we don't use
        #       .intersection() here
        nodes = self._nodes
        keys = [key for key in keys if key in nodes]
        if self.node_ref_lists:
            for key in keys:
                value, node_refs = nodes[key]
                yield self, key, value, node_refs
        else:
            for key in keys:
                yield self, key, nodes[key]
    def iter_entries(self, keys):
        """Iterate over keys within the index.
        :param keys: An iterable providing the keys to be retrieved.
        :return: An iterable as per iter_all_entries, but restricted to the
            keys supplied. No additional keys will be returned, and every
            key supplied that is in the index will be returned.
        """
        keys = set(keys)
        if not keys:
            return []
        if self._size is None and self._nodes is None:
            self._buffer_all()
        # We fit about 20 keys per minimum-read (4K), so if we are looking for
        # more than 1/20th of the index its likely (assuming homogenous key
        # spread) that we'll read the entire index. If we're going to do that,
        # buffer the whole thing. A better analysis might take key spread into
        # account - but B+Tree indices are better anyway.
        # We could look at all data read, and use a threshold there, which will
        # trigger on ancestry walks, but that is not yet fully mapped out.
        if self._nodes is None and len(keys) * 20 > self.key_count():
            self._buffer_all()
        if self._nodes is not None:
            return self._iter_entries_from_total_buffer(keys)
        else:
            return (result[1] for result in bisect_multi.bisect_multi_bytes(
                self._lookup_keys_via_location, self._size, keys))
    def iter_entries_prefix(self, keys):
        """Iterate over keys within the index using prefix matching.
        Prefix matching is applied within the tuple of a key, not to within
        the bytestring of each key element. e.g. if you have the keys ('foo',
        'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
        only the former key is returned.
        WARNING: Note that this method currently causes a full index parse
        unconditionally (which is reasonably appropriate as it is a means for
        thunking many small indices into one larger one and still supplies
        iter_all_entries at the thunk layer).
        :param keys: An iterable providing the key prefixes to be retrieved.
            Each key prefix takes the form of a tuple the length of a key, but
            with the last N elements 'None' rather than a regular bytestring.
            The first element cannot be 'None'.
        :return: An iterable as per iter_all_entries, but restricted to the
            keys with a matching prefix to those supplied. No additional keys
            will be returned, and every match that is in the index will be
            returned.
        """
        keys = set(keys)
        if not keys:
            return
        # load data - also finds key lengths
        if self._nodes is None:
            self._buffer_all()
        if self._key_length == 1:
            for key in keys:
                # sanity check
                if key[0] is None:
                    raise errors.BadIndexKey(key)
                if len(key) != self._key_length:
                    raise errors.BadIndexKey(key)
                if self.node_ref_lists:
                    value, node_refs = self._nodes[key]
                    yield self, key, value, node_refs
                else:
                    yield self, key, self._nodes[key]
            return
        nodes_by_key = self._get_nodes_by_key()
        for key in keys:
            # sanity check
            if key[0] is None:
                raise errors.BadIndexKey(key)
            if len(key) != self._key_length:
                raise errors.BadIndexKey(key)
            # find what it refers to:
            key_dict = nodes_by_key
            elements = list(key)
            # find the subdict whose contents should be returned.
            try:
                while len(elements) and elements[0] is not None:
                    key_dict = key_dict[elements[0]]
                    elements.pop(0)
            except KeyError:
                # a non-existant lookup.
                continue
            if len(elements):
                dicts = [key_dict]
                while dicts:
                    key_dict = dicts.pop(-1)
                    # can't be empty or would not exist
                    item, value = key_dict.iteritems().next()
                    if type(value) == dict:
                        # push keys
                        dicts.extend(key_dict.itervalues())
                    else:
                        # yield keys
                        for value in key_dict.itervalues():
                            # each value is the key:value:node refs tuple
                            # ready to yield.
                            yield (self, ) + value
            else:
                # the last thing looked up was a terminal element
                yield (self, ) + key_dict
    def _find_ancestors(self, keys, ref_list_num, parent_map, missing_keys):
        """See BTreeIndex._find_ancestors."""
        # The api can be implemented as a trivial overlay on top of
        # iter_entries, it is not an efficient implementation, but it at least
        # gets the job done.
        found_keys = set()
        search_keys = set()
        for index, key, value, refs in self.iter_entries(keys):
            parent_keys = refs[ref_list_num]
            found_keys.add(key)
            parent_map[key] = parent_keys
            search_keys.update(parent_keys)
        # Figure out what, if anything, was missing
        missing_keys.update(set(keys).difference(found_keys))
        search_keys = search_keys.difference(parent_map)
        return search_keys
    def key_count(self):
        """Return an estimate of the number of keys in this index.
        For GraphIndex the estimate is exact.
        """
        if self._key_count is None:
            self._read_and_parse([_HEADER_READV])
        return self._key_count
    def _lookup_keys_via_location(self, location_keys):
        """Public interface for implementing bisection.
        If _buffer_all has been called, then all the data for the index is in
        memory, and this method should not be called, as it uses a separate
        cache because it cannot pre-resolve all indices, which buffer_all does
        for performance.
        :param location_keys: A list of location(byte offset), key tuples.
        :return: A list of (location_key, result) tuples as expected by
            bzrlib.bisect_multi.bisect_multi_bytes.
        """
        # Possible improvements:
        #  - only bisect lookup each key once
        #  - sort the keys first, and use that to reduce the bisection window
        # -----
        # this progresses in three parts:
        # read data
        # parse it
        # attempt to answer the question from the now in memory data.
        # build the readv request
        # for each location, ask for 800 bytes - much more than rows we've seen
        # anywhere.
        readv_ranges = []
        for location, key in location_keys:
            # can we answer from cache?
            if self._bisect_nodes and key in self._bisect_nodes:
                # We have the key parsed.
                continue
            index = self._parsed_key_index(key)
            if (len(self._parsed_key_map) and
                self._parsed_key_map[index][0] <= key and
                (self._parsed_key_map[index][1] >= key or
                 # end of the file has been parsed
                 self._parsed_byte_map[index][1] == self._size)):
                # the key has been parsed, so no lookup is needed even if its
                # not present.
                continue
            # - if we have examined this part of the file already - yes
            index = self._parsed_byte_index(location)
            if (len(self._parsed_byte_map) and
                self._parsed_byte_map[index][0] <= location and
                self._parsed_byte_map[index][1] > location):
                # the byte region has been parsed, so no read is needed.
                continue
            length = 800
            if location + length > self._size:
                length = self._size - location
            # todo, trim out parsed locations.
            if length > 0:
                readv_ranges.append((location, length))
        # read the header if needed
        if self._bisect_nodes is None:
            readv_ranges.append(_HEADER_READV)
        self._read_and_parse(readv_ranges)
        result = []
        if self._nodes is not None:
            # _read_and_parse triggered a _buffer_all because we requested the
            # whole data range
            for location, key in location_keys:
                if key not in self._nodes: # not present
                    result.append(((location, key), False))
                elif self.node_ref_lists:
                    value, refs = self._nodes[key]
                    result.append(((location, key),
                        (self, key, value, refs)))
                else:
                    result.append(((location, key),
                        (self, key, self._nodes[key])))
            return result
        # generate results:
        #  - figure out <, >, missing, present
        #  - result present references so we can return them.
        # keys that we cannot answer until we resolve references
        pending_references = []
        pending_locations = set()
        for location, key in location_keys:
            # can we answer from cache?
            if key in self._bisect_nodes:
                # the key has been parsed, so no lookup is needed
                if self.node_ref_lists:
                    # the references may not have been all parsed.
                    value, refs = self._bisect_nodes[key]
                    wanted_locations = []
                    for ref_list in refs:
                        for ref in ref_list:
                            if ref not in self._keys_by_offset:
                                wanted_locations.append(ref)
                    if wanted_locations:
                        pending_locations.update(wanted_locations)
                        pending_references.append((location, key))
                        continue
                    result.append(((location, key), (self, key,
                        value, self._resolve_references(refs))))
                else:
                    result.append(((location, key),
                        (self, key, self._bisect_nodes[key])))
                continue
            else:
                # has the region the key should be in, been parsed?
                index = self._parsed_key_index(key)
                if (self._parsed_key_map[index][0] <= key and
                    (self._parsed_key_map[index][1] >= key or
                     # end of the file has been parsed
                     self._parsed_byte_map[index][1] == self._size)):
                    result.append(((location, key), False))
                    continue
            # no, is the key above or below the probed location:
            # get the range of the probed & parsed location
            index = self._parsed_byte_index(location)
            # if the key is below the start of the range, its below
            if key < self._parsed_key_map[index][0]:
                direction = -1
            else:
                direction = +1
            result.append(((location, key), direction))
        readv_ranges = []
        # lookup data to resolve references
        for location in pending_locations:
            length = 800
            if location + length > self._size:
                length = self._size - location
            # TODO: trim out parsed locations (e.g. if the 800 is into the
            # parsed region trim it, and dont use the adjust_for_latency
            # facility)
            if length > 0:
                readv_ranges.append((location, length))
        self._read_and_parse(readv_ranges)
        if self._nodes is not None:
            # The _read_and_parse triggered a _buffer_all, grab the data and
            # return it
            for location, key in pending_references:
                value, refs = self._nodes[key]
                result.append(((location, key), (self, key, value, refs)))
            return result
        for location, key in pending_references:
            # answer key references we had to look-up-late.
            value, refs = self._bisect_nodes[key]
            result.append(((location, key), (self, key,
                value, self._resolve_references(refs))))
        return result
    def _parse_header_from_bytes(self, bytes):
        """Parse the header from a region of bytes.
        :param bytes: The data to parse.
        :return: An offset, data tuple such as readv yields, for the unparsed
            data. (which may length 0).
        """
        signature = bytes[0:len(self._signature())]
        if not signature == self._signature():
            raise errors.BadIndexFormatSignature(self._name, GraphIndex)
        lines = bytes[len(self._signature()):].splitlines()
        options_line = lines[0]
        if not options_line.startswith(_OPTION_NODE_REFS):
            raise errors.BadIndexOptions(self)
        try:
            self.node_ref_lists = int(options_line[len(_OPTION_NODE_REFS):])
        except ValueError:
            raise errors.BadIndexOptions(self)
        options_line = lines[1]
        if not options_line.startswith(_OPTION_KEY_ELEMENTS):
            raise errors.BadIndexOptions(self)
        try:
            self._key_length = int(options_line[len(_OPTION_KEY_ELEMENTS):])
        except ValueError:
            raise errors.BadIndexOptions(self)
        options_line = lines[2]
        if not options_line.startswith(_OPTION_LEN):
            raise errors.BadIndexOptions(self)
        try:
            self._key_count = int(options_line[len(_OPTION_LEN):])
        except ValueError:
            raise errors.BadIndexOptions(self)
        # calculate the bytes we have processed
        header_end = (len(signature) + len(lines[0]) + len(lines[1]) +
            len(lines[2]) + 3)
        self._parsed_bytes(0, None, header_end, None)
        # setup parsing state
        self._expected_elements = 3 + self._key_length
        # raw data keyed by offset
        self._keys_by_offset = {}
        # keys with the value and node references
        self._bisect_nodes = {}
        return header_end, bytes[header_end:]
    def _parse_region(self, offset, data):
        """Parse node data returned from a readv operation.
        :param offset: The byte offset the data starts at.
        :param data: The data to parse.
        """
        # trim the data.
        # end first:
        end = offset + len(data)
        high_parsed = offset
        while True:
            # Trivial test - if the current index's end is within the
            # low-matching parsed range, we're done.
            index = self._parsed_byte_index(high_parsed)
            if end < self._parsed_byte_map[index][1]:
                return
            # print "[%d:%d]" % (offset, end), \
            #     self._parsed_byte_map[index:index + 2]
            high_parsed, last_segment = self._parse_segment(
                offset, data, end, index)
            if last_segment:
                return
    def _parse_segment(self, offset, data, end, index):
        """Parse one segment of data.
        :param offset: Where 'data' begins in the file.
        :param data: Some data to parse a segment of.
        :param end: Where data ends
        :param index: The current index into the parsed bytes map.
        :return: True if the parsed segment is the last possible one in the
            range of data.
        :return: high_parsed_byte, last_segment.
            high_parsed_byte is the location of the highest parsed byte in this
            segment, last_segment is True if the parsed segment is the last
            possible one in the data block.
        """
        # default is to use all data
        trim_end = None
        # accomodate overlap with data before this.
        if offset < self._parsed_byte_map[index][1]:
            # overlaps the lower parsed region
            # skip the parsed data
            trim_start = self._parsed_byte_map[index][1] - offset
            # don't trim the start for \n
            start_adjacent = True
        elif offset == self._parsed_byte_map[index][1]:
            # abuts the lower parsed region
            # use all data
            trim_start = None
            # do not trim anything
            start_adjacent = True
        else:
            # does not overlap the lower parsed region
            # use all data
            trim_start = None
            # but trim the leading \n
            start_adjacent = False
        if end == self._size:
            # lines up to the end of all data:
            # use it all
            trim_end = None
            # do not strip to the last \n
            end_adjacent = True
            last_segment = True
        elif index + 1 == len(self._parsed_byte_map):
            # at the end of the parsed data
            # use it all
            trim_end = None
            # but strip to the last \n
            end_adjacent = False
            last_segment = True
        elif end == self._parsed_byte_map[index + 1][0]:
            # buts up against the next parsed region
            # use it all
            trim_end = None
            # do not strip to the last \n
            end_adjacent = True
            last_segment = True
        elif end > self._parsed_byte_map[index + 1][0]:
            # overlaps into the next parsed region
            # only consider the unparsed data
            trim_end = self._parsed_byte_map[index + 1][0] - offset
            # do not strip to the last \n as we know its an entire record
            end_adjacent = True
            last_segment = end < self._parsed_byte_map[index + 1][1]
        else:
            # does not overlap into the next region
            # use it all
            trim_end = None
            # but strip to the last \n
            end_adjacent = False
            last_segment = True
        # now find bytes to discard if needed
        if not start_adjacent:
            # work around python bug in rfind
            if trim_start is None:
                trim_start = data.find('\n') + 1
            else:
                trim_start = data.find('\n', trim_start) + 1
            if not (trim_start != 0):
                raise AssertionError('no \n was present')
            # print 'removing start', offset, trim_start, repr(data[:trim_start])
        if not end_adjacent:
            # work around python bug in rfind
            if trim_end is None:
                trim_end = data.rfind('\n') + 1
            else:
                trim_end = data.rfind('\n', None, trim_end) + 1
            if not (trim_end != 0):
                raise AssertionError('no \n was present')
            # print 'removing end', offset, trim_end, repr(data[trim_end:])
        # adjust offset and data to the parseable data.
        trimmed_data = data[trim_start:trim_end]
        if not (trimmed_data):
            raise AssertionError('read unneeded data [%d:%d] from [%d:%d]'
                % (trim_start, trim_end, offset, offset + len(data)))
        if trim_start:
            offset += trim_start
        # print "parsing", repr(trimmed_data)
        # splitlines mangles the \r delimiters.. don't use it.
        lines = trimmed_data.split('\n')
        del lines[-1]
        pos = offset
        first_key, last_key, nodes, _ = self._parse_lines(lines, pos)
        for key, value in nodes:
            self._bisect_nodes[key] = value
        self._parsed_bytes(offset, first_key,
            offset + len(trimmed_data), last_key)
        return offset + len(trimmed_data), last_segment
    def _parse_lines(self, lines, pos):
        key = None
        first_key = None
        trailers = 0
        nodes = []
        for line in lines:
            if line == '':
                # must be at the end
                if self._size:
                    if not (self._size == pos + 1):
                        raise AssertionError("%s %s" % (self._size, pos))
                trailers += 1
                continue
            elements = line.split('\0')
            if len(elements) != self._expected_elements:
                raise errors.BadIndexData(self)
            # keys are tuples. Each element is a string that may occur many
            # times, so we intern them to save space. AB, RC, 200807
            key = tuple([intern(element) for element in elements[:self._key_length]])
            if first_key is None:
                first_key = key
            absent, references, value = elements[-3:]
            ref_lists = []
            for ref_string in references.split('\t'):
                ref_lists.append(tuple([
                    int(ref) for ref in ref_string.split('\r') if ref
                    ]))
            ref_lists = tuple(ref_lists)
            self._keys_by_offset[pos] = (key, absent, ref_lists, value)
            pos += len(line) + 1 # +1 for the \n
            if absent:
                continue
            if self.node_ref_lists:
                node_value = (value, ref_lists)
            else:
                node_value = value
            nodes.append((key, node_value))
            # print "parsed ", key
        return first_key, key, nodes, trailers
    def _parsed_bytes(self, start, start_key, end, end_key):
        """Mark the bytes from start to end as parsed.
        Calling self._parsed_bytes(1,2) will mark one byte (the one at offset
        1) as parsed.
        :param start: The start of the parsed region.
        :param end: The end of the parsed region.
        """
        index = self._parsed_byte_index(start)
        new_value = (start, end)
        new_key = (start_key, end_key)
        if index == -1:
            # first range parsed is always the beginning.
            self._parsed_byte_map.insert(index, new_value)
            self._parsed_key_map.insert(index, new_key)
            return
        # four cases:
        # new region
        # extend lower region
        # extend higher region
        # combine two regions
        if (index + 1 < len(self._parsed_byte_map) and
            self._parsed_byte_map[index][1] == start and
            self._parsed_byte_map[index + 1][0] == end):
            # combine two regions
            self._parsed_byte_map[index] = (self._parsed_byte_map[index][0],
                self._parsed_byte_map[index + 1][1])
            self._parsed_key_map[index] = (self._parsed_key_map[index][0],
                self._parsed_key_map[index + 1][1])
            del self._parsed_byte_map[index + 1]
            del self._parsed_key_map[index + 1]
        elif self._parsed_byte_map[index][1] == start:
            # extend the lower entry
            self._parsed_byte_map[index] = (
                self._parsed_byte_map[index][0], end)
            self._parsed_key_map[index] = (
                self._parsed_key_map[index][0], end_key)
        elif (index + 1 < len(self._parsed_byte_map) and
            self._parsed_byte_map[index + 1][0] == end):
            # extend the higher entry
            self._parsed_byte_map[index + 1] = (
                start, self._parsed_byte_map[index + 1][1])
            self._parsed_key_map[index + 1] = (
                start_key, self._parsed_key_map[index + 1][1])
        else:
            # new entry
            self._parsed_byte_map.insert(index + 1, new_value)
            self._parsed_key_map.insert(index + 1, new_key)
    def _read_and_parse(self, readv_ranges):
        """Read the ranges and parse the resulting data.
        :param readv_ranges: A prepared readv range list.
        """
        if not readv_ranges:
            return
        if self._nodes is None and self._bytes_read * 2 >= self._size:
            # We've already read more than 50% of the file and we are about to
            # request more data, just _buffer_all() and be done
            self._buffer_all()
            return
        base_offset = self._base_offset
        if base_offset != 0:
            # Rewrite the ranges for the offset
            readv_ranges = [(start+base_offset, size)
                            for start, size in readv_ranges]
        readv_data = self._transport.readv(self._name, readv_ranges, True,
            self._size + self._base_offset)
        # parse
        for offset, data in readv_data:
            offset -= base_offset
            self._bytes_read += len(data)
            if offset < 0:
                # transport.readv() expanded to extra data which isn't part of
                # this index
                data = data[-offset:]
                offset = 0
            if offset == 0 and len(data) == self._size:
                # We read the whole range, most likely because the
                # Transport upcast our readv ranges into one long request
                # for enough total data to grab the whole index.
                self._buffer_all(StringIO(data))
                return
            if self._bisect_nodes is None:
                # this must be the start
                if not (offset == 0):
                    raise AssertionError()
                offset, data = self._parse_header_from_bytes(data)
            # print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
            self._parse_region(offset, data)
    def _signature(self):
        """The file signature for this index type."""
        return _SIGNATURE
    def validate(self):
        """Validate that everything in the index can be accessed."""
        # iter_all validates completely at the moment, so just do that.
        for node in self.iter_all_entries():
            pass
class CombinedGraphIndex(object):
    """A GraphIndex made up from smaller GraphIndices.
    The backing indices must implement GraphIndex, and are presumed to be
    static data.
    Queries against the combined index will be made against the first index,
    and then the second and so on. The order of indices can thus influence
    performance significantly. For example, if one index is on local disk and a
    second on a remote server, the local disk index should be before the other
    in the index list.
    
    Also, queries tend to need results from the same indices as previous
    queries.  So the indices will be reordered after every query to put the
    indices that had the result(s) of that query first (while otherwise
    preserving the relative ordering).
    """
    def __init__(self, indices, reload_func=None):
        """Create a CombinedGraphIndex backed by indices.
        :param indices: An ordered list of indices to query for data.
        :param reload_func: A function to call if we find we are missing an
            index. Should have the form reload_func() => True/False to indicate
            if reloading actually changed anything.
        """
        self._indices = indices
        self._reload_func = reload_func
        # Sibling indices are other CombinedGraphIndex that we should call
        # _move_to_front_by_name on when we auto-reorder ourself.
        self._sibling_indices = []
        # A list of names that corresponds to the instances in self._indices,
        # so _index_names[0] is always the name for _indices[0], etc.  Sibling
        # indices must all use the same set of names as each other.
        self._index_names = [None] * len(self._indices)
    def __repr__(self):
        return "%s(%s)" % (
                self.__class__.__name__,
                ', '.join(map(repr, self._indices)))
    def clear_cache(self):
        """See GraphIndex.clear_cache()"""
        for index in self._indices:
            index.clear_cache()
    def get_parent_map(self, keys):
        """See graph.StackedParentsProvider.get_parent_map"""
        search_keys = set(keys)
        if _mod_revision.NULL_REVISION in search_keys:
            search_keys.discard(_mod_revision.NULL_REVISION)
            found_parents = {_mod_revision.NULL_REVISION:[]}
        else:
            found_parents = {}
        for index, key, value, refs in self.iter_entries(search_keys):
            parents = refs[0]
            if not parents:
                parents = (_mod_revision.NULL_REVISION,)
            found_parents[key] = parents
        return found_parents
    has_key = _has_key_from_parent_map
    def insert_index(self, pos, index, name=None):
        """Insert a new index in the list of indices to query.
        :param pos: The position to insert the index.
        :param index: The index to insert.
        :param name: a name for this index, e.g. a pack name.  These names can
            be used to reflect index reorderings to related CombinedGraphIndex
            instances that use the same names.  (see set_sibling_indices)
        """
        self._indices.insert(pos, index)
        self._index_names.insert(pos, name)
    def iter_all_entries(self):
        """Iterate over all keys within the index
        Duplicate keys across child indices are presumed to have the same
        value and are only reported once.
        :return: An iterable of (index, key, reference_lists, value).
            There is no defined order for the result iteration - it will be in
            the most efficient order for the index.
        """
        seen_keys = set()
        while True:
            try:
                for index in self._indices:
                    for node in index.iter_all_entries():
                        if node[1] not in seen_keys:
                            yield node
                            seen_keys.add(node[1])
                return
            except errors.NoSuchFile:
                self._reload_or_raise()
    def iter_entries(self, keys):
        """Iterate over keys within the index.
        Duplicate keys across child indices are presumed to have the same
        value and are only reported once.
        :param keys: An iterable providing the keys to be retrieved.
        :return: An iterable of (index, key, reference_lists, value). There is
            no defined order for the result iteration - it will be in the most
            efficient order for the index.
        """
        keys = set(keys)
        hit_indices = []
        while True:
            try:
                for index in self._indices:
                    if not keys:
                        break
                    index_hit = False
                    for node in index.iter_entries(keys):
                        keys.remove(node[1])
                        yield node
                        index_hit = True
                    if index_hit:
                        hit_indices.append(index)
                break
            except errors.NoSuchFile:
                self._reload_or_raise()
        self._move_to_front(hit_indices)
    def iter_entries_prefix(self, keys):
        """Iterate over keys within the index using prefix matching.
        Duplicate keys across child indices are presumed to have the same
        value and are only reported once.
        Prefix matching is applied within the tuple of a key, not to within
        the bytestring of each key element. e.g. if you have the keys ('foo',
        'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
        only the former key is returned.
        :param keys: An iterable providing the key prefixes to be retrieved.
            Each key prefix takes the form of a tuple the length of a key, but
            with the last N elements 'None' rather than a regular bytestring.
            The first element cannot be 'None'.
        :return: An iterable as per iter_all_entries, but restricted to the
            keys with a matching prefix to those supplied. No additional keys
            will be returned, and every match that is in the index will be
            returned.
        """
        keys = set(keys)
        if not keys:
            return
        seen_keys = set()
        hit_indices = []
        while True:
            try:
                for index in self._indices:
                    index_hit = False
                    for node in index.iter_entries_prefix(keys):
                        if node[1] in seen_keys:
                            continue
                        seen_keys.add(node[1])
                        yield node
                        index_hit = True
                    if index_hit:
                        hit_indices.append(index)
                break
            except errors.NoSuchFile:
                self._reload_or_raise()
        self._move_to_front(hit_indices)
    def _move_to_front(self, hit_indices):
        """Rearrange self._indices so that hit_indices are first.
        Order is maintained as much as possible, e.g. the first unhit index
        will be the first index in _indices after the hit_indices, and the
        hit_indices will be present in exactly the order they are passed to
        _move_to_front.
        _move_to_front propagates to all objects in self._sibling_indices by
        calling _move_to_front_by_name.
        """
        if self._indices[:len(hit_indices)] == hit_indices:
            # The 'hit_indices' are already at the front (and in the same
            # order), no need to re-order
            return
        hit_names = self._move_to_front_by_index(hit_indices)
        for sibling_idx in self._sibling_indices:
            sibling_idx._move_to_front_by_name(hit_names)
    def _move_to_front_by_index(self, hit_indices):
        """Core logic for _move_to_front.
        
        Returns a list of names corresponding to the hit_indices param.
        """
        indices_info = zip(self._index_names, self._indices)
        if 'index' in debug.debug_flags:
            trace.mutter('CombinedGraphIndex reordering: currently %r, '
                         'promoting %r', indices_info, hit_indices)
        hit_names = []
        unhit_names = []
        new_hit_indices = []
        unhit_indices = []
        for offset, (name, idx) in enumerate(indices_info):
            if idx in hit_indices:
                hit_names.append(name)
                new_hit_indices.append(idx)
                if len(new_hit_indices) == len(hit_indices):
                    # We've found all of the hit entries, everything else is
                    # unhit
                    unhit_names.extend(self._index_names[offset+1:])
                    unhit_indices.extend(self._indices[offset+1:])
                    break
            else:
                unhit_names.append(name)
                unhit_indices.append(idx)
        self._indices = new_hit_indices + unhit_indices
        self._index_names = hit_names + unhit_names
        if 'index' in debug.debug_flags:
            trace.mutter('CombinedGraphIndex reordered: %r', self._indices)
        return hit_names
    def _move_to_front_by_name(self, hit_names):
        """Moves indices named by 'hit_names' to front of the search order, as
        described in _move_to_front.
        """
        # Translate names to index instances, and then call
        # _move_to_front_by_index.
        indices_info = zip(self._index_names, self._indices)
        hit_indices = []
        for name, idx in indices_info:
            if name in hit_names:
                hit_indices.append(idx)
        self._move_to_front_by_index(hit_indices)
    def find_ancestry(self, keys, ref_list_num):
        """Find the complete ancestry for the given set of keys.
        Note that this is a whole-ancestry request, so it should be used
        sparingly.
        :param keys: An iterable of keys to look for
        :param ref_list_num: The reference list which references the parents
            we care about.
        :return: (parent_map, missing_keys)
        """
        # XXX: make this call _move_to_front?
        missing_keys = set()
        parent_map = {}
        keys_to_lookup = set(keys)
        generation = 0
        while keys_to_lookup:
            # keys that *all* indexes claim are missing, stop searching them
            generation += 1
            all_index_missing = None
            # print 'gen\tidx\tsub\tn_keys\tn_pmap\tn_miss'
            # print '%4d\t\t\t%4d\t%5d\t%5d' % (generation, len(keys_to_lookup),
            #                                   len(parent_map),
            #                                   len(missing_keys))
            for index_idx, index in enumerate(self._indices):
                # TODO: we should probably be doing something with
                #       'missing_keys' since we've already determined that
                #       those revisions have not been found anywhere
                index_missing_keys = set()
                # Find all of the ancestry we can from this index
                # keep looking until the search_keys set is empty, which means
                # things we didn't find should be in index_missing_keys
                search_keys = keys_to_lookup
                sub_generation = 0
                # print '    \t%2d\t\t%4d\t%5d\t%5d' % (
                #     index_idx, len(search_keys),
                #     len(parent_map), len(index_missing_keys))
                while search_keys:
                    sub_generation += 1
                    # TODO: ref_list_num should really be a parameter, since
                    #       CombinedGraphIndex does not know what the ref lists
                    #       mean.
                    search_keys = index._find_ancestors(search_keys,
                        ref_list_num, parent_map, index_missing_keys)
                    # print '    \t  \t%2d\t%4d\t%5d\t%5d' % (
                    #     sub_generation, len(search_keys),
                    #     len(parent_map), len(index_missing_keys))
                # Now set whatever was missing to be searched in the next index
                keys_to_lookup = index_missing_keys
                if all_index_missing is None:
                    all_index_missing = set(index_missing_keys)
                else:
                    all_index_missing.intersection_update(index_missing_keys)
                if not keys_to_lookup:
                    break
            if all_index_missing is None:
                # There were no indexes, so all search keys are 'missing'
                missing_keys.update(keys_to_lookup)
                keys_to_lookup = None
            else:
                missing_keys.update(all_index_missing)
                keys_to_lookup.difference_update(all_index_missing)
        return parent_map, missing_keys
    def key_count(self):
        """Return an estimate of the number of keys in this index.
        For CombinedGraphIndex this is approximated by the sum of the keys of
        the child indices. As child indices may have duplicate keys this can
        have a maximum error of the number of child indices * largest number of
        keys in any index.
        """
        while True:
            try:
                return sum((index.key_count() for index in self._indices), 0)
            except errors.NoSuchFile:
                self._reload_or_raise()
    missing_keys = _missing_keys_from_parent_map
    def _reload_or_raise(self):
        """We just got a NoSuchFile exception.
        Try to reload the indices, if it fails, just raise the current
        exception.
        """
        if self._reload_func is None:
            raise
        exc_type, exc_value, exc_traceback = sys.exc_info()
        trace.mutter('Trying to reload after getting exception: %s',
                     exc_value)
        if not self._reload_func():
            # We tried to reload, but nothing changed, so we fail anyway
            trace.mutter('_reload_func indicated nothing has changed.'
                         ' Raising original exception.')
            raise exc_type, exc_value, exc_traceback
    def set_sibling_indices(self, sibling_combined_graph_indices):
        """Set the CombinedGraphIndex objects to reorder after reordering self.
        """
        self._sibling_indices = sibling_combined_graph_indices
    def validate(self):
        """Validate that everything in the index can be accessed."""
        while True:
            try:
                for index in self._indices:
                    index.validate()
                return
            except errors.NoSuchFile:
                self._reload_or_raise()
class InMemoryGraphIndex(GraphIndexBuilder):
    """A GraphIndex which operates entirely out of memory and is mutable.
    This is designed to allow the accumulation of GraphIndex entries during a
    single write operation, where the accumulated entries need to be immediately
    available - for example via a CombinedGraphIndex.
    """
    def add_nodes(self, nodes):
        """Add nodes to the index.
        :param nodes: An iterable of (key, node_refs, value) entries to add.
        """
        if self.reference_lists:
            for (key, value, node_refs) in nodes:
                self.add_node(key, value, node_refs)
        else:
            for (key, value) in nodes:
                self.add_node(key, value)
    def iter_all_entries(self):
        """Iterate over all keys within the index
        :return: An iterable of (index, key, reference_lists, value). There is no
            defined order for the result iteration - it will be in the most
            efficient order for the index (in this case dictionary hash order).
        """
        if 'evil' in debug.debug_flags:
            trace.mutter_callsite(3,
                "iter_all_entries scales with size of history.")
        if self.reference_lists:
            for key, (absent, references, value) in self._nodes.iteritems():
                if not absent:
                    yield self, key, value, references
        else:
            for key, (absent, references, value) in self._nodes.iteritems():
                if not absent:
                    yield self, key, value
    def iter_entries(self, keys):
        """Iterate over keys within the index.
        :param keys: An iterable providing the keys to be retrieved.
        :return: An iterable of (index, key, value, reference_lists). There is no
            defined order for the result iteration - it will be in the most
            efficient order for the index (keys iteration order in this case).
        """
        # Note: See BTreeBuilder.iter_entries for an explanation of why we
        #       aren't using set().intersection() here
        nodes = self._nodes
        keys = [key for key in keys if key in nodes]
        if self.reference_lists:
            for key in keys:
                node = nodes[key]
                if not node[0]:
                    yield self, key, node[2], node[1]
        else:
            for key in keys:
                node = nodes[key]
                if not node[0]:
                    yield self, key, node[2]
    def iter_entries_prefix(self, keys):
        """Iterate over keys within the index using prefix matching.
        Prefix matching is applied within the tuple of a key, not to within
        the bytestring of each key element. e.g. if you have the keys ('foo',
        'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
        only the former key is returned.
        :param keys: An iterable providing the key prefixes to be retrieved.
            Each key prefix takes the form of a tuple the length of a key, but
            with the last N elements 'None' rather than a regular bytestring.
            The first element cannot be 'None'.
        :return: An iterable as per iter_all_entries, but restricted to the
            keys with a matching prefix to those supplied. No additional keys
            will be returned, and every match that is in the index will be
            returned.
        """
        # XXX: To much duplication with the GraphIndex class; consider finding
        # a good place to pull out the actual common logic.
        keys = set(keys)
        if not keys:
            return
        if self._key_length == 1:
            for key in keys:
                # sanity check
                if key[0] is None:
                    raise errors.BadIndexKey(key)
                if len(key) != self._key_length:
                    raise errors.BadIndexKey(key)
                node = self._nodes[key]
                if node[0]:
                    continue
                if self.reference_lists:
                    yield self, key, node[2], node[1]
                else:
                    yield self, key, node[2]
            return
        nodes_by_key = self._get_nodes_by_key()
        for key in keys:
            # sanity check
            if key[0] is None:
                raise errors.BadIndexKey(key)
            if len(key) != self._key_length:
                raise errors.BadIndexKey(key)
            # find what it refers to:
            key_dict = nodes_by_key
            elements = list(key)
            # find the subdict to return
            try:
                while len(elements) and elements[0] is not None:
                    key_dict = key_dict[elements[0]]
                    elements.pop(0)
            except KeyError:
                # a non-existant lookup.
                continue
            if len(elements):
                dicts = [key_dict]
                while dicts:
                    key_dict = dicts.pop(-1)
                    # can't be empty or would not exist
                    item, value = key_dict.iteritems().next()
                    if type(value) == dict:
                        # push keys
                        dicts.extend(key_dict.itervalues())
                    else:
                        # yield keys
                        for value in key_dict.itervalues():
                            yield (self, ) + value
            else:
                yield (self, ) + key_dict
    def key_count(self):
        """Return an estimate of the number of keys in this index.
        For InMemoryGraphIndex the estimate is exact.
        """
        return len(self._nodes) - len(self._absent_keys)
    def validate(self):
        """In memory index's have no known corruption at the moment."""
class GraphIndexPrefixAdapter(object):
    """An adapter between GraphIndex with different key lengths.
    Queries against this will emit queries against the adapted Graph with the
    prefix added, queries for all items use iter_entries_prefix. The returned
    nodes will have their keys and node references adjusted to remove the
    prefix. Finally, an add_nodes_callback can be supplied - when called the
    nodes and references being added will have prefix prepended.
    """
    def __init__(self, adapted, prefix, missing_key_length,
        add_nodes_callback=None):
        """Construct an adapter against adapted with prefix."""
        self.adapted = adapted
        self.prefix_key = prefix + (None,)*missing_key_length
        self.prefix = prefix
        self.prefix_len = len(prefix)
        self.add_nodes_callback = add_nodes_callback
    def add_nodes(self, nodes):
        """Add nodes to the index.
        :param nodes: An iterable of (key, node_refs, value) entries to add.
        """
        # save nodes in case its an iterator
        nodes = tuple(nodes)
        translated_nodes = []
        try:
            # Add prefix_key to each reference node_refs is a tuple of tuples,
            # so split it apart, and add prefix_key to the internal reference
            for (key, value, node_refs) in nodes:
                adjusted_references = (
                    tuple(tuple(self.prefix + ref_node for ref_node in ref_list)
                        for ref_list in node_refs))
                translated_nodes.append((self.prefix + key, value,
                    adjusted_references))
        except ValueError:
            # XXX: TODO add an explicit interface for getting the reference list
            # status, to handle this bit of user-friendliness in the API more
            # explicitly.
            for (key, value) in nodes:
                translated_nodes.append((self.prefix + key, value))
        self.add_nodes_callback(translated_nodes)
    def add_node(self, key, value, references=()):
        """Add a node to the index.
        :param key: The key. keys are non-empty tuples containing
            as many whitespace-free utf8 bytestrings as the key length
            defined for this index.
        :param references: An iterable of iterables of keys. Each is a
            reference to another key.
        :param value: The value to associate with the key. It may be any
            bytes as long as it does not contain \0 or \n.
        """
        self.add_nodes(((key, value, references), ))
    def _strip_prefix(self, an_iter):
        """Strip prefix data from nodes and return it."""
        for node in an_iter:
            # cross checks
            if node[1][:self.prefix_len] != self.prefix:
                raise errors.BadIndexData(self)
            for ref_list in node[3]:
                for ref_node in ref_list:
                    if ref_node[:self.prefix_len] != self.prefix:
                        raise errors.BadIndexData(self)
            yield node[0], node[1][self.prefix_len:], node[2], (
                tuple(tuple(ref_node[self.prefix_len:] for ref_node in ref_list)
                for ref_list in node[3]))
    def iter_all_entries(self):
        """Iterate over all keys within the index
        iter_all_entries is implemented against the adapted index using
        iter_entries_prefix.
        :return: An iterable of (index, key, reference_lists, value). There is no
            defined order for the result iteration - it will be in the most
            efficient order for the index (in this case dictionary hash order).
        """
        return self._strip_prefix(self.adapted.iter_entries_prefix([self.prefix_key]))
    def iter_entries(self, keys):
        """Iterate over keys within the index.
        :param keys: An iterable providing the keys to be retrieved.
        :return: An iterable of (index, key, value, reference_lists). There is no
            defined order for the result iteration - it will be in the most
            efficient order for the index (keys iteration order in this case).
        """
        return self._strip_prefix(self.adapted.iter_entries(
            self.prefix + key for key in keys))
    def iter_entries_prefix(self, keys):
        """Iterate over keys within the index using prefix matching.
        Prefix matching is applied within the tuple of a key, not to within
        the bytestring of each key element. e.g. if you have the keys ('foo',
        'bar'), ('foobar', 'gam') and do a prefix search for ('foo', None) then
        only the former key is returned.
        :param keys: An iterable providing the key prefixes to be retrieved.
            Each key prefix takes the form of a tuple the length of a key, but
            with the last N elements 'None' rather than a regular bytestring.
            The first element cannot be 'None'.
        :return: An iterable as per iter_all_entries, but restricted to the
            keys with a matching prefix to those supplied. No additional keys
            will be returned, and every match that is in the index will be
            returned.
        """
        return self._strip_prefix(self.adapted.iter_entries_prefix(
            self.prefix + key for key in keys))
    def key_count(self):
        """Return an estimate of the number of keys in this index.
        For GraphIndexPrefixAdapter this is relatively expensive - key
        iteration with the prefix is done.
        """
        return len(list(self.iter_all_entries()))
    def validate(self):
        """Call the adapted's validate."""
        self.adapted.validate()
 | 
	gpl-2.0 | -1,710,965,484,435,133,000 | 41.883298 | 92 | 0.56449 | false | 
| 
	scalable-networks/ext | 
	gnuradio-3.7.0.1/gr-trellis/examples/python/test_turbo_equalization1.py | 
	13 | 
	5480 | 
	#!/usr/bin/env python
from gnuradio import gr
from gnuradio import trellis, digital, filter, blocks
from gnuradio import eng_notation
import math
import sys
import random
import fsm_utils
try:
    from gnuradio import analog
except ImportError:
    sys.stderr.write("Error: Program requires gr-analog.\n")
    sys.exit(1)
def make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,type):
    metrics_in = trellis.metrics_f(fi.O(),dimensionality,tot_constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner SISO
    scale = blocks.multiply_const_ff(1.0/N0)
    gnd = blocks.vector_source_f([0],True);
    inter=[]
    deinter=[]
    siso_in=[]
    siso_out=[]
    # generate all blocks
    for it in range(IT):
      inter.append( trellis.permutation(interleaver.K(),interleaver.INTER(),fi.I(),gr.sizeof_float) )
      siso_in.append( trellis.siso_f(fi,K,0,-1,True,False,type) )
      deinter.append( trellis.permutation(interleaver.K(),interleaver.DEINTER(),fi.I(),gr.sizeof_float) )
      if it < IT-1:
        siso_out.append( trellis.siso_f(fo,K,0,-1,False,True,type) )
      else:
        siso_out.append( trellis.viterbi_s(fo,K,0,-1) ) # no soft outputs needed
    # connect first stage
    tb.connect (gnd,inter[0])
    tb.connect (metrics_in,scale)
    tb.connect (scale,(siso_in[0],1))
    # connect the rest
    for it in range(IT):
      if it < IT-1:
        tb.connect (scale,(siso_in[it+1],1))
        tb.connect (siso_in[it],deinter[it],(siso_out[it],1))
        tb.connect (gnd,(siso_out[it],0))
        tb.connect (siso_out[it],inter[it+1])
        tb.connect (inter[it],(siso_in[it],0))
      else:
        tb.connect (siso_in[it],deinter[it],siso_out[it])
        tb.connect (inter[it],(siso_in[it],0))
    return (metrics_in,siso_out[IT-1])
def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,Es,N0,IT,seed):
    tb = gr.top_block ()
    L = len(channel)
    # TX
    # this for loop is TOO slow in python!!!
    packet = [0]*(K)
    random.seed(seed)
    for i in range(len(packet)):
        packet[i] = random.randint(0, 2**bitspersymbol - 1) # random symbols
    src = blocks.vector_source_s(packet,False)
    enc_out = trellis.encoder_ss(fo,0) # initial state = 0
    inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short)
    mod = digital.chunks_to_symbols_sf(modulation[1],modulation[0])
    # CHANNEL
    isi = filter.fir_filter_fff(1,channel)
    add = blocks.add_ff()
    noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0/2),seed)
    # RX
    (head,tail) = make_rx(tb,fo,fi,dimensionality,tot_constellation,K,interleaver,IT,Es,N0,trellis.TRELLIS_MIN_SUM)
    dst = blocks.vector_sink_s();
    tb.connect (src,enc_out,inter,mod)
    tb.connect (mod,isi,(add,0))
    tb.connect (noise,(add,1))
    tb.connect (add,head)
    tb.connect (tail,dst)
    tb.run()
    data = dst.data()
    ntotal = len(data)
    nright=0
    for i in range(ntotal):
        if packet[i]==data[i]:
            nright=nright+1
        #else:
            #print "Error in ", i
    return (ntotal,ntotal-nright)
def main(args):
    nargs = len (args)
    if nargs == 3:
        fname_out=args[0]
        esn0_db=float(args[1])
        rep=int(args[2])
    else:
        sys.stderr.write ('usage: test_turbo_equalization.py fsm_name_out Es/No_db  repetitions\n')
        sys.exit (1)
    # system parameters
    Kb=64*16  # packet size in bits (multiple of 16)
    modulation = fsm_utils.pam4 # see fsm_utlis.py for available predefined modulations
    channel = fsm_utils.c_channel # see fsm_utlis.py for available predefined test channels
    fo=trellis.fsm(fname_out) # get the outer FSM specification from a file
    fi=trellis.fsm(len(modulation[1]),len(channel)) # generate the FSM automatically
    if fo.O() != fi.I():
        sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n')
        sys.exit (1)
    bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol
    K=Kb/bitspersymbol # packet size in trellis steps
    interleaver=trellis.interleaver(K,666) # construct a random interleaver
    tot_channel = fsm_utils.make_isi_lookup(modulation,channel,True) # generate the lookup table (normalize energy to 1)
    dimensionality = tot_channel[0]
    tot_constellation = tot_channel[1]
    if len(tot_constellation)/dimensionality != fi.O():
        sys.stderr.write ('Incompatible FSM output cardinality and lookup table size.\n')
        sys.exit (1)
    N0=pow(10.0,-esn0_db/10.0); # noise variance
    IT = 3 # number of turbo iterations
    tot_s=0 # total number of transmitted shorts
    terr_s=0 # total number of shorts in error
    terr_p=0 # total number of packets in error
    for i in range(rep):
        (s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,channel,modulation,dimensionality,tot_constellation,1,N0,IT,-long(666+i)) # run experiment with different seed to get different noise realizations
        tot_s=tot_s+s
        terr_s=terr_s+e
        terr_p=terr_p+(terr_s!=0)
        if ((i+1)%10==0) : # display progress
            print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
    # estimate of the (short or bit) error rate
    print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
    main (sys.argv[1:])
 | 
	gpl-2.0 | -7,890,500,674,692,229,000 | 35.052632 | 206 | 0.644708 | false | 
| 
	astagi/django-cms | 
	cms/test_utils/project/placeholderapp/migrations_django/0001_initial.py | 
	66 | 
	4526 | 
	# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.fields
import cms.test_utils.project.placeholderapp.models
class Migration(migrations.Migration):
    dependencies = [
        ('cms', '0002_auto_20140816_1918'),
    ]
    operations = [
        migrations.CreateModel(
            name='DynamicPlaceholderSlotExample',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
                ('char_1', models.CharField(max_length=255, verbose_name='char_1')),
                ('char_2', models.CharField(max_length=255, verbose_name='char_2')),
                ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_1, related_name='dynamic_pl_1', editable=False)),
                ('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname=cms.test_utils.project.placeholderapp.models.dynamic_placeholder_2, related_name='dynamic_pl_2', editable=False)),
            ],
            options={
            },
            bases=(models.Model,),
        ),
        migrations.CreateModel(
            name='Example1',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
                ('char_1', models.CharField(max_length=255, verbose_name='char_1')),
                ('char_2', models.CharField(max_length=255, verbose_name='char_2')),
                ('char_3', models.CharField(max_length=255, verbose_name='char_3')),
                ('char_4', models.CharField(max_length=255, verbose_name='char_4')),
                ('date_field', models.DateField(null=True)),
                ('placeholder', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder', editable=False)),
            ],
            options={
            },
            bases=(models.Model,),
        ),
        migrations.CreateModel(
            name='MultilingualExample1',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
                ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', editable=False)),
            ],
            options={
                'abstract': False,
            },
            bases=(models.Model,),
        ),
        migrations.CreateModel(
            name='MultilingualExample1Translation',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
                ('char_1', models.CharField(max_length=255, verbose_name='char_1')),
                ('char_2', models.CharField(max_length=255, verbose_name='char_2')),
                ('language_code', models.CharField(db_index=True, max_length=15)),
                ('master', models.ForeignKey(null=True, to='placeholderapp.MultilingualExample1', related_name='translations', editable=False)),
            ],
            options={
                'db_table': 'placeholderapp_multilingualexample1_translation',
            },
            bases=(models.Model,),
        ),
        migrations.AlterUniqueTogether(
            name='multilingualexample1translation',
            unique_together=set([('language_code', 'master')]),
        ),
        migrations.CreateModel(
            name='TwoPlaceholderExample',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
                ('char_1', models.CharField(max_length=255, verbose_name='char_1')),
                ('char_2', models.CharField(max_length=255, verbose_name='char_2')),
                ('char_3', models.CharField(max_length=255, verbose_name='char_3')),
                ('char_4', models.CharField(max_length=255, verbose_name='char_4')),
                ('placeholder_1', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_1', related_name='p1', editable=False)),
                ('placeholder_2', cms.models.fields.PlaceholderField(null=True, to='cms.Placeholder', slotname='placeholder_2', related_name='p2', editable=False)),
            ],
            options={
            },
            bases=(models.Model,),
        ),
    ]
 | 
	bsd-3-clause | -2,308,999,552,748,953,000 | 50.431818 | 225 | 0.58418 | false | 
| 
	lhupfeldt/multiconf | 
	test/invalid_values_test.py | 
	1 | 
	19200 | 
	# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import sys
import os.path
from pytest import raises
from multiconf import mc_config, ConfigItem, ConfigException, MC_REQUIRED
from multiconf.envs import EnvFactory
from .utils.utils import config_error, next_line_num, replace_ids, lines_in, start_file_line
from .utils.messages import already_printed_msg, config_error_mc_required_expected, mc_required_expected
from .utils.messages import config_error_never_received_value_expected
from .utils.tstclasses import ItemWithAA
from .utils.invalid_values_classes import  McRequiredInInitL1, McRequiredInInitL3
minor_version = sys.version_info[1]
_utils = os.path.join(os.path.dirname(__file__), 'utils')
ef1_prod_pp = EnvFactory()
pp1 = ef1_prod_pp.Env('pp')
prod1 = ef1_prod_pp.Env('prod')
def ce(line_num, *lines):
    return config_error(__file__, line_num, *lines)
_attribute_mc_required_expected = mc_required_expected.format(attr='aa', env=prod1)
_mc_required_one_error_expected_ex = """There was 1 error when defining item: {
    "__class__": "ItemWithAA #as: 'ItemWithAA', id: 0000, not-frozen",
    "env": {
        "__class__": "Env",
        "name": "%(env_name)s"
    },
    "aa": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_mc_required_env(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with ItemWithAA() as cr:
                errorline[0] = next_line_num()
                cr.setattr('aa', prod=MC_REQUIRED, pp="hello")
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=prod1),
        start_file_line(__file__, errorline[0]),
        '^ConfigError: ' + _attribute_mc_required_expected,
    )
    assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_mc_force_env(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with ItemWithAA() as cr:
                errorline[0] = next_line_num()
                cr.setattr('aa', default=MC_REQUIRED, mc_force=True)
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp')
def test_attribute_mc_required_default(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with ItemWithAA() as cr:
                errorline[0] = next_line_num()
                cr.setattr('aa', default=MC_REQUIRED, pp="hello")
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=prod1),
        start_file_line(__file__, errorline[0]),
        '^ConfigError: ' + _attribute_mc_required_expected,
    )
    assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_default_resolved_with_default_value_in_mc_init(capsys):
    class ItemWithAAMcInitResolve(ItemWithAA):
        def mc_init(self):
            super().mc_init()
            self.aa = 'Hi'
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        with ItemWithAAMcInitResolve() as cr:
            cr.setattr('aa', default=MC_REQUIRED, pp="hello")
    cfg = config(pp1)
    assert cfg.ItemWithAAMcInitResolve.aa == 'hello'
    cfg = config(prod1)
    assert cfg.ItemWithAAMcInitResolve.aa == 'Hi'
def test_attribute_mc_required_default_resolved_with_default_env_specific_value_in_mc_init(capsys):
    class ItemWithAAMcInitResolve(ItemWithAA):
        def mc_init(self):
            super().mc_init()
            self.setattr('aa', prod='Hi')
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        with ItemWithAAMcInitResolve() as cr:
            cr.setattr('aa', default=MC_REQUIRED, pp="hello")
    cfg = config(pp1)
    assert cfg.ItemWithAAMcInitResolve.aa == 'hello'
    cfg = config(prod1)
    assert cfg.ItemWithAAMcInitResolve.aa == 'Hi'
def test_attribute_mc_required_init(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with ItemWithAA(aa=MC_REQUIRED) as ci:
                errorline[0] = next_line_num()
                ci.setattr('aa', pp="hello")
    _sout, serr = capsys.readouterr()
    print(serr)
    print("errorline[0]", errorline[0])
    assert serr == ce(errorline[0], _attribute_mc_required_expected)
    assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='prod')
def test_attribute_mc_required_in_with(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with ItemWithAA() as cr:
                errorline[0] = next_line_num()
                cr.setattr('aa', prod="hi", pp=MC_REQUIRED)
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=pp1),
        start_file_line(__file__, errorline[0]),
        '^ConfigError: ' + mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _mc_required_one_error_expected_ex % dict(env_name='pp')
def test_attribute_mc_required_in_with_default_all_overridden():
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        with ItemWithAA() as cr:
            # TODO: This should actually not be allowed, it does not make sense!
            cr.setattr('aa', default=MC_REQUIRED, pp="hello", prod="hi")
    cr = config(prod1).ItemWithAA
    assert cr.aa == "hi"
def test_attribute_mc_required_init_args_all_overridden():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            self.aa = aa
    @mc_config(ef1_prod_pp, load_now=True)
    def config1(root):
        with ConfigItem() as cr:
            Requires(aa=3)
    cr = config1(prod1).ConfigItem
    assert cr.Requires.aa == 3
    @mc_config(ef1_prod_pp, load_now=True)
    def config2(root):
        with ConfigItem() as cr:
            with Requires() as rq:
                rq.aa = 3
    cr = config2(prod1).ConfigItem
    assert cr.Requires.aa == 3
def test_attribute_mc_required_args_all_overridden_in_mc_init():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            self.aa = aa
        def mc_init(self):
            self.aa = 7
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        Requires()
    cr = config(prod1)
    assert cr.Requires.aa == 7
def test_attribute_mc_required_args_partial_set_in_init_overridden_in_mc_init():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            # Partial assignment is allowed in init
            self.setattr('aa', prod=aa)
            self.setattr('b', default=MC_REQUIRED, prod=2)
        def mc_init(self):
            self.aa = 7
            self.b = 7
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        Requires()
    cr = config(prod1)
    assert cr.Requires.aa == 7
    assert cr.Requires.b == 2
    cr = config(pp1)
    assert cr.Requires.aa == 7
    assert cr.Requires.b == 7
def test_attribute_mc_required_args_partial_set_in_init_overridden_in_with():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            # Partial assignment is allowed in init
            self.setattr('aa', prod=aa)
            self.setattr('b', default=MC_REQUIRED, prod=2)
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        with Requires() as rq:
            rq.aa = 8
            rq.setattr('b', pp=8)
    cr = config(prod1)
    assert cr.Requires.aa == 8
    assert cr.Requires.b == 2
    cr = config(pp1)
    assert cr.Requires.aa == 8
    assert cr.Requires.b == 8
def test_attribute_mc_required_args_set_in_init_overridden_in_with():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            self.aa = aa
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        with Requires() as rq:
            rq.aa = 7
    cr = config(prod1)
    assert cr.Requires.aa == 7
    cr = config(pp1)
    assert cr.Requires.aa == 7
_attribute_mc_required_requires_expected_ex = """There was 1 error when defining item: {
    "__class__": "Requires #as: 'Requires', id: 0000, not-frozen",
    "env": {
        "__class__": "Env",
        "name": "pp"
    },
    "aa": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_mc_required_init_args_missing_env_value(capsys):
    errorline = [None]
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            self.aa = aa
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            with Requires() as rq:
                errorline[0] = next_line_num()
                rq.setattr('aa', prod='hi')
    _sout, serr = capsys.readouterr()
    print(_sout)
    assert serr == ce(errorline[0], mc_required_expected.format(attr='aa', env=pp1))
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_requires_expected_ex
_attribute_mc_required_required_init_arg_missing_with_expected_ex = """There was 1 error when defining item: {{
    "__class__": "{0} #as: '{0}', id: 0000, not-frozen",
    "env": {{
        "__class__": "Env",
        "name": "pp"
    }},
    "aa": "MC_REQUIRED"
}}""" + already_printed_msg
def test_attribute_mc_required_init_args_missing_with(capsys):
    errorline = [None]
    # If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config
    with raises(ConfigException) as exinfo:
        errorline[0] = next_line_num() + (1 if minor_version > 7 else 0)
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            McRequiredInInitL1()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=pp1),
        '^File "{}/invalid_values_classes.py", line 8'.format(_utils),
        mc_required_expected.format(attr='aa', env=pp1),
    )
    exp = _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
    got = replace_ids(str(exinfo.value), False)
    assert got == exp
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config0(root):
            with McRequiredInInitL1():
                errorline[0] = next_line_num()
                pass
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=pp1),
        '^File "{}/invalid_values_classes.py", line 8'.format(_utils),
        mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
    # If the error occures on the last object, and that is not under a with statement, then the line will be the @mc_config
    with raises(ConfigException) as exinfo:
        errorline[0] = next_line_num() + (1 if minor_version > 7 else 0)
        @mc_config(ef1_prod_pp, load_now=True)
        def config1(root):
            McRequiredInInitL3()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=pp1),
        '^File "{}/invalid_values_classes.py", line 8'.format(_utils),
        mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3')
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config2(root):
            with McRequiredInInitL3():
                errorline[0] = next_line_num()
                pass
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_never_received_value_expected.format(env=pp1),
        '^File "{}/invalid_values_classes.py", line 8'.format(_utils),
        mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL3')
def test_attribute_mc_required_init_args_missing_previous_item(capsys):
    errorline = [None]
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(root):
            errorline[0] = next_line_num()
            McRequiredInInitL1()
            McRequiredInInitL3()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        config_error_never_received_value_expected.format(env=pp1),
        '^File "{}/invalid_values_classes.py", line 8'.format(_utils),
        mc_required_expected.format(attr='aa', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_required_init_arg_missing_with_expected_ex.format('McRequiredInInitL1')
def test_attribute_mc_required_init_assign_all_overridden():
    class Requires(ConfigItem):
        def __init__(self, aa=MC_REQUIRED):
            super().__init__()
            self.aa = aa
    @mc_config(ef1_prod_pp, load_now=True)
    def config(root):
        Requires(aa=3)
    cr = config(prod1)
    assert cr.Requires.aa == 3
    @mc_config(ef1_prod_pp, load_now=True)
    def config(_):
        with Requires() as rq:
            rq.aa = 3
    cr = config(prod1)
    assert cr.Requires.aa == 3
_attribute_mc_required_env_in_init_expected_ex = """There were %(num_errors)s errors when defining item: {
    "__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen",
    "env": {
        "__class__": "Env",
        "name": "pp"
    },
    "aa": "MC_REQUIRED",
    "bb": "MC_REQUIRED"
}""" + already_printed_msg
def test_attribute_setattr_mc_required_force_in_init(capsys):
    errorline = [None]
    class MyRoot(ConfigItem):
        def __init__(self):
            super().__init__()
            errorline[0] = next_line_num()
            self.setattr('aa', default=MC_REQUIRED, mc_force=True)
            self.setattr('bb', default=MC_REQUIRED, mc_force=True)
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(_):
            MyRoot()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_mc_required_expected.format(attr='aa', env=pp1),
        config_error_mc_required_expected.format(attr='bb', env=pp1),
    )
    assert replace_ids(str(exinfo.value), False) == _attribute_mc_required_env_in_init_expected_ex % dict(num_errors=2)
def test_multiple_attributes_mc_required_init_not_set(capsys):
    errorline = [None]
    class ItemWithAAABBCC(ConfigItem):
        def __init__(self):
            super().__init__()
            self.aa = MC_REQUIRED
            self.bb = MC_REQUIRED
            self.cc = MC_REQUIRED
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(_):
            with ConfigItem() as cr:
                errorline[0] = next_line_num()
                ItemWithAAABBCC()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorline[0]),
        config_error_mc_required_expected.format(attr='aa', env=pp1),
        config_error_mc_required_expected.format(attr='bb', env=pp1),
        config_error_mc_required_expected.format(attr='cc', env=pp1),
    )
def test_multiple_attributes_mc_required_mc_init_not_set(capsys):
    errorlines = [None, None]
    class ItemWithAAABBCC(ConfigItem):
        def __init__(self):
            super().__init__()
            self.aa = MC_REQUIRED
            self.bb = MC_REQUIRED
            self.cc = MC_REQUIRED
        def mc_init(self):
            super().__init__()
            errorlines[0] = next_line_num()
            self.setattr('aa', default=MC_REQUIRED)
            self.setattr('bb', default=MC_REQUIRED, pp='Hello')
            errorlines[1] = next_line_num()
            self.cc = MC_REQUIRED
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(_):
            with ConfigItem() as cr:
                ItemWithAAABBCC()
    _sout, serr = capsys.readouterr()
    assert lines_in(
        serr,
        start_file_line(__file__, errorlines[0]),
        config_error_mc_required_expected.format(attr='aa', env=pp1),
        start_file_line(__file__, errorlines[1]),
        config_error_mc_required_expected.format(attr='cc', env=pp1),
    )
_multiple_attributes_mc_required_env_expected_ex = """There %(ww)s %(num_errors)s %(err)s when defining item: {
    "__class__": "MyRoot #as: 'MyRoot', id: 0000, not-frozen",
    "env": {
        "__class__": "Env",
        "name": "pp"
    },
    "aa": "hello",
    "bb": "MC_REQUIRED"
}""" + already_printed_msg
def test_multiple_attributes_mc_required_env(capsys):
    errorline = [None]
    class MyRoot(ConfigItem):
        def __init__(self):
            super().__init__()
            self.aa = MC_REQUIRED
            self.bb = MC_REQUIRED
    with raises(ConfigException) as exinfo:
        @mc_config(ef1_prod_pp, load_now=True)
        def config(_):
            with MyRoot() as cr:
                errorline[0] = next_line_num()
                cr.setattr('aa', prod=MC_REQUIRED, pp="hello")
                cr.setattr('bb', prod=1, pp=MC_REQUIRED)
    _sout, serr = capsys.readouterr()
    #assert ce(errorline[0], mc_required_expected.format(attr='aa', env=prod1)) in serr
    assert ce(errorline[0] + 1, mc_required_expected.format(attr='bb', env=pp1)) in serr
    assert replace_ids(str(exinfo.value), False) == _multiple_attributes_mc_required_env_expected_ex % dict(ww='was', num_errors=1, err='error')
 | 
	bsd-3-clause | -5,479,040,787,900,449,000 | 32.217993 | 146 | 0.60651 | false | 
| 
	mshafiq9/django | 
	tests/gis_tests/geos_tests/test_mutable_list.py | 
	173 | 
	14846 | 
	# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from django.contrib.gis.geos.mutable_list import ListMixin
from django.utils import six
class UserListA(ListMixin):
    _mytype = tuple
    def __init__(self, i_list, *args, **kwargs):
        self._list = self._mytype(i_list)
        super(UserListA, self).__init__(*args, **kwargs)
    def __len__(self):
        return len(self._list)
    def __str__(self):
        return str(self._list)
    def __repr__(self):
        return repr(self._list)
    def _set_list(self, length, items):
        # this would work:
        # self._list = self._mytype(items)
        # but then we wouldn't be testing length parameter
        itemList = ['x'] * length
        for i, v in enumerate(items):
            itemList[i] = v
        self._list = self._mytype(itemList)
    def _get_single_external(self, index):
        return self._list[index]
class UserListB(UserListA):
    _mytype = list
    def _set_single(self, index, value):
        self._list[index] = value
def nextRange(length):
    nextRange.start += 100
    return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
    """
    Tests base class ListMixin by comparing a list clone which is
    a ListMixin subclass with a real Python list.
    """
    limit = 3
    listType = UserListA
    def lists_of_len(self, length=None):
        if length is None:
            length = self.limit
        pl = list(range(length))
        return pl, self.listType(pl)
    def limits_plus(self, b):
        return range(-self.limit - b, self.limit + b)
    def step_range(self):
        return list(range(-1 - self.limit, 0)) + list(range(1, 1 + self.limit))
    def test01_getslice(self):
        'Slice retrieval'
        pl, ul = self.lists_of_len()
        for i in self.limits_plus(1):
            self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
            self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
            for j in self.limits_plus(1):
                self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i, j))
                for k in self.step_range():
                    self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i, j, k))
            for k in self.step_range():
                self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i, k))
                self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i, k))
        for k in self.step_range():
            self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
    def test02_setslice(self):
        'Slice assignment'
        def setfcn(x, i, j, k, L):
            x[i:j:k] = range(L)
        pl, ul = self.lists_of_len()
        for slen in range(self.limit + 1):
            ssl = nextRange(slen)
            ul[:] = ssl
            pl[:] = ssl
            self.assertEqual(pl, ul[:], 'set slice [:]')
            for i in self.limits_plus(1):
                ssl = nextRange(slen)
                ul[i:] = ssl
                pl[i:] = ssl
                self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
                ssl = nextRange(slen)
                ul[:i] = ssl
                pl[:i] = ssl
                self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
                for j in self.limits_plus(1):
                    ssl = nextRange(slen)
                    ul[i:j] = ssl
                    pl[i:j] = ssl
                    self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
                    for k in self.step_range():
                        ssl = nextRange(len(ul[i:j:k]))
                        ul[i:j:k] = ssl
                        pl[i:j:k] = ssl
                        self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
                        sliceLen = len(ul[i:j:k])
                        self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
                        if sliceLen > 2:
                            self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
                for k in self.step_range():
                    ssl = nextRange(len(ul[i::k]))
                    ul[i::k] = ssl
                    pl[i::k] = ssl
                    self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
                    ssl = nextRange(len(ul[:i:k]))
                    ul[:i:k] = ssl
                    pl[:i:k] = ssl
                    self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
            for k in self.step_range():
                ssl = nextRange(len(ul[::k]))
                ul[::k] = ssl
                pl[::k] = ssl
                self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
    def test03_delslice(self):
        'Delete slice'
        for Len in range(self.limit):
            pl, ul = self.lists_of_len(Len)
            del pl[:]
            del ul[:]
            self.assertEqual(pl[:], ul[:], 'del slice [:]')
            for i in range(-Len - 1, Len + 1):
                pl, ul = self.lists_of_len(Len)
                del pl[i:]
                del ul[i:]
                self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
                pl, ul = self.lists_of_len(Len)
                del pl[:i]
                del ul[:i]
                self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
                for j in range(-Len - 1, Len + 1):
                    pl, ul = self.lists_of_len(Len)
                    del pl[i:j]
                    del ul[i:j]
                    self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i, j))
                    for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
                        pl, ul = self.lists_of_len(Len)
                        del pl[i:j:k]
                        del ul[i:j:k]
                        self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i, j, k))
                for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
                    pl, ul = self.lists_of_len(Len)
                    del pl[:i:k]
                    del ul[:i:k]
                    self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i, k))
                    pl, ul = self.lists_of_len(Len)
                    del pl[i::k]
                    del ul[i::k]
                    self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i, k))
            for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
                pl, ul = self.lists_of_len(Len)
                del pl[::k]
                del ul[::k]
                self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
    def test04_get_set_del_single(self):
        'Get/set/delete single item'
        pl, ul = self.lists_of_len()
        for i in self.limits_plus(0):
            self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
        for i in self.limits_plus(0):
            pl, ul = self.lists_of_len()
            pl[i] = 100
            ul[i] = 100
            self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
        for i in self.limits_plus(0):
            pl, ul = self.lists_of_len()
            del pl[i]
            del ul[i]
            self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
    def test05_out_of_range_exceptions(self):
        'Out of range exceptions'
        def setfcn(x, i):
            x[i] = 20
        def getfcn(x, i):
            return x[i]
        def delfcn(x, i):
            del x[i]
        pl, ul = self.lists_of_len()
        for i in (-1 - self.limit, self.limit):
            self.assertRaises(IndexError, setfcn, ul, i)  # 'set index %d' % i)
            self.assertRaises(IndexError, getfcn, ul, i)  # 'get index %d' % i)
            self.assertRaises(IndexError, delfcn, ul, i)  # 'del index %d' % i)
    def test06_list_methods(self):
        'List methods'
        pl, ul = self.lists_of_len()
        pl.append(40)
        ul.append(40)
        self.assertEqual(pl[:], ul[:], 'append')
        pl.extend(range(50, 55))
        ul.extend(range(50, 55))
        self.assertEqual(pl[:], ul[:], 'extend')
        pl.reverse()
        ul.reverse()
        self.assertEqual(pl[:], ul[:], 'reverse')
        for i in self.limits_plus(1):
            pl, ul = self.lists_of_len()
            pl.insert(i, 50)
            ul.insert(i, 50)
            self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
        for i in self.limits_plus(0):
            pl, ul = self.lists_of_len()
            self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
            self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
        pl, ul = self.lists_of_len()
        self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
        self.assertEqual(pl[:], ul[:], 'after pop')
        pl, ul = self.lists_of_len()
        def popfcn(x, i):
            x.pop(i)
        self.assertRaises(IndexError, popfcn, ul, self.limit)
        self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
        pl, ul = self.lists_of_len()
        for val in range(self.limit):
            self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
        for val in self.limits_plus(2):
            self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
        for val in range(self.limit):
            pl, ul = self.lists_of_len()
            pl.remove(val)
            ul.remove(val)
            self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
        def indexfcn(x, v):
            return x.index(v)
        def removefcn(x, v):
            return x.remove(v)
        self.assertRaises(ValueError, indexfcn, ul, 40)
        self.assertRaises(ValueError, removefcn, ul, 40)
    def test07_allowed_types(self):
        'Type-restricted list'
        pl, ul = self.lists_of_len()
        ul._allowed = six.integer_types
        ul[1] = 50
        ul[:2] = [60, 70, 80]
        def setfcn(x, i, v):
            x[i] = v
        self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
        self.assertRaises(TypeError, setfcn, ul, slice(0, 3, 2), ('hello', 'goodbye'))
    def test08_min_length(self):
        'Length limits'
        pl, ul = self.lists_of_len()
        ul._minlength = 1
        def delfcn(x, i):
            del x[:i]
        def setfcn(x, i):
            x[:i] = []
        for i in range(self.limit - ul._minlength + 1, self.limit + 1):
            self.assertRaises(ValueError, delfcn, ul, i)
            self.assertRaises(ValueError, setfcn, ul, i)
        del ul[:ul._minlength]
        ul._maxlength = 4
        for i in range(0, ul._maxlength - len(ul)):
            ul.append(i)
        self.assertRaises(ValueError, ul.append, 10)
    def test09_iterable_check(self):
        'Error on assigning non-iterable to slice'
        pl, ul = self.lists_of_len(self.limit + 1)
        def setfcn(x, i, v):
            x[i] = v
        self.assertRaises(TypeError, setfcn, ul, slice(0, 3, 2), 2)
    def test10_checkindex(self):
        'Index check'
        pl, ul = self.lists_of_len()
        for i in self.limits_plus(0):
            if i < 0:
                self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
            else:
                self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
        for i in (-self.limit - 1, self.limit):
            self.assertRaises(IndexError, ul._checkindex, i)
    def test_11_sorting(self):
        'Sorting'
        pl, ul = self.lists_of_len()
        pl.insert(0, pl.pop())
        ul.insert(0, ul.pop())
        pl.sort()
        ul.sort()
        self.assertEqual(pl[:], ul[:], 'sort')
        mid = pl[len(pl) // 2]
        pl.sort(key=lambda x: (mid - x) ** 2)
        ul.sort(key=lambda x: (mid - x) ** 2)
        self.assertEqual(pl[:], ul[:], 'sort w/ key')
        pl.insert(0, pl.pop())
        ul.insert(0, ul.pop())
        pl.sort(reverse=True)
        ul.sort(reverse=True)
        self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
        mid = pl[len(pl) // 2]
        pl.sort(key=lambda x: (mid - x) ** 2)
        ul.sort(key=lambda x: (mid - x) ** 2)
        self.assertEqual(pl[:], ul[:], 'sort w/ key')
    def test_12_arithmetic(self):
        'Arithmetic'
        pl, ul = self.lists_of_len()
        al = list(range(10, 14))
        self.assertEqual(list(pl + al), list(ul + al), 'add')
        self.assertEqual(type(ul), type(ul + al), 'type of add result')
        self.assertEqual(list(al + pl), list(al + ul), 'radd')
        self.assertEqual(type(al), type(al + ul), 'type of radd result')
        objid = id(ul)
        pl += al
        ul += al
        self.assertEqual(pl[:], ul[:], 'in-place add')
        self.assertEqual(objid, id(ul), 'in-place add id')
        for n in (-1, 0, 1, 3):
            pl, ul = self.lists_of_len()
            self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
            self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
            self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
            self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
            objid = id(ul)
            pl *= n
            ul *= n
            self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
            self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
        pl, ul = self.lists_of_len()
        self.assertEqual(pl, ul, 'cmp for equal')
        self.assertNotEqual(ul, pl + [2], 'cmp for not equal')
        self.assertGreaterEqual(pl, ul, 'cmp for gte self')
        self.assertLessEqual(pl, ul, 'cmp for lte self')
        self.assertGreaterEqual(ul, pl, 'cmp for self gte')
        self.assertLessEqual(ul, pl, 'cmp for self lte')
        self.assertGreater(pl + [5], ul, 'cmp')
        self.assertGreaterEqual(pl + [5], ul, 'cmp')
        self.assertLess(pl, ul + [2], 'cmp')
        self.assertLessEqual(pl, ul + [2], 'cmp')
        self.assertGreater(ul + [5], pl, 'cmp')
        self.assertGreaterEqual(ul + [5], pl, 'cmp')
        self.assertLess(ul, pl + [2], 'cmp')
        self.assertLessEqual(ul, pl + [2], 'cmp')
        # Also works with a custom IndexError
        ul_longer = ul + [2]
        ul_longer._IndexError = TypeError
        ul._IndexError = TypeError
        self.assertNotEqual(ul_longer, pl)
        self.assertGreater(ul_longer, ul)
        pl[1] = 20
        self.assertGreater(pl, ul, 'cmp for gt self')
        self.assertLess(ul, pl, 'cmp for self lt')
        pl[1] = -20
        self.assertLess(pl, ul, 'cmp for lt self')
        self.assertGreater(ul, pl, 'cmp for gt self')
class ListMixinTestSingle(ListMixinTest):
    listType = UserListB
 | 
	bsd-3-clause | 1,729,381,976,463,640,800 | 34.180095 | 93 | 0.488819 | false | 
| 
	theheros/kbengine | 
	kbe/src/lib/python/Lib/test/test_raise.py | 
	54 | 
	10232 | 
	# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Tests for the raise statement."""
from test import support
import sys
import types
import unittest
def get_tb():
    try:
        raise OSError()
    except:
        return sys.exc_info()[2]
class Context:
    def __enter__(self):
        return self
    def __exit__(self, exc_type, exc_value, exc_tb):
        return True
class TestRaise(unittest.TestCase):
    def test_invalid_reraise(self):
        try:
            raise
        except RuntimeError as e:
            self.assertIn("No active exception", str(e))
        else:
            self.fail("No exception raised")
    def test_reraise(self):
        try:
            try:
                raise IndexError()
            except IndexError as e:
                exc1 = e
                raise
        except IndexError as exc2:
            self.assertTrue(exc1 is exc2)
        else:
            self.fail("No exception raised")
    def test_except_reraise(self):
        def reraise():
            try:
                raise TypeError("foo")
            except:
                try:
                    raise KeyError("caught")
                except KeyError:
                    pass
                raise
        self.assertRaises(TypeError, reraise)
    def test_finally_reraise(self):
        def reraise():
            try:
                raise TypeError("foo")
            except:
                try:
                    raise KeyError("caught")
                finally:
                    raise
        self.assertRaises(KeyError, reraise)
    def test_nested_reraise(self):
        def nested_reraise():
            raise
        def reraise():
            try:
                raise TypeError("foo")
            except:
                nested_reraise()
        self.assertRaises(TypeError, reraise)
    def test_with_reraise1(self):
        def reraise():
            try:
                raise TypeError("foo")
            except:
                with Context():
                    pass
                raise
        self.assertRaises(TypeError, reraise)
    def test_with_reraise2(self):
        def reraise():
            try:
                raise TypeError("foo")
            except:
                with Context():
                    raise KeyError("caught")
                raise
        self.assertRaises(TypeError, reraise)
    def test_yield_reraise(self):
        def reraise():
            try:
                raise TypeError("foo")
            except:
                yield 1
                raise
        g = reraise()
        next(g)
        self.assertRaises(TypeError, lambda: next(g))
        self.assertRaises(StopIteration, lambda: next(g))
    def test_erroneous_exception(self):
        class MyException(Exception):
            def __init__(self):
                raise RuntimeError()
        try:
            raise MyException
        except RuntimeError:
            pass
        else:
            self.fail("No exception raised")
    def test_new_returns_invalid_instance(self):
        # See issue #11627.
        class MyException(Exception):
            def __new__(cls, *args):
                return object()
        with self.assertRaises(TypeError):
            raise MyException
class TestCause(unittest.TestCase):
    def test_invalid_cause(self):
        try:
            raise IndexError from 5
        except TypeError as e:
            self.assertIn("exception cause", str(e))
        else:
            self.fail("No exception raised")
    def test_class_cause(self):
        try:
            raise IndexError from KeyError
        except IndexError as e:
            self.assertIsInstance(e.__cause__, KeyError)
        else:
            self.fail("No exception raised")
    def test_instance_cause(self):
        cause = KeyError()
        try:
            raise IndexError from cause
        except IndexError as e:
            self.assertTrue(e.__cause__ is cause)
        else:
            self.fail("No exception raised")
    def test_erroneous_cause(self):
        class MyException(Exception):
            def __init__(self):
                raise RuntimeError()
        try:
            raise IndexError from MyException
        except RuntimeError:
            pass
        else:
            self.fail("No exception raised")
class TestTraceback(unittest.TestCase):
    def test_sets_traceback(self):
        try:
            raise IndexError()
        except IndexError as e:
            self.assertIsInstance(e.__traceback__, types.TracebackType)
        else:
            self.fail("No exception raised")
    def test_accepts_traceback(self):
        tb = get_tb()
        try:
            raise IndexError().with_traceback(tb)
        except IndexError as e:
            self.assertNotEqual(e.__traceback__, tb)
            self.assertEqual(e.__traceback__.tb_next, tb)
        else:
            self.fail("No exception raised")
class TestContext(unittest.TestCase):
    def test_instance_context_instance_raise(self):
        context = IndexError()
        try:
            try:
                raise context
            except:
                raise OSError()
        except OSError as e:
            self.assertEqual(e.__context__, context)
        else:
            self.fail("No exception raised")
    def test_class_context_instance_raise(self):
        context = IndexError
        try:
            try:
                raise context
            except:
                raise OSError()
        except OSError as e:
            self.assertNotEqual(e.__context__, context)
            self.assertIsInstance(e.__context__, context)
        else:
            self.fail("No exception raised")
    def test_class_context_class_raise(self):
        context = IndexError
        try:
            try:
                raise context
            except:
                raise OSError
        except OSError as e:
            self.assertNotEqual(e.__context__, context)
            self.assertIsInstance(e.__context__, context)
        else:
            self.fail("No exception raised")
    def test_c_exception_context(self):
        try:
            try:
                1/0
            except:
                raise OSError
        except OSError as e:
            self.assertIsInstance(e.__context__, ZeroDivisionError)
        else:
            self.fail("No exception raised")
    def test_c_exception_raise(self):
        try:
            try:
                1/0
            except:
                xyzzy
        except NameError as e:
            self.assertIsInstance(e.__context__, ZeroDivisionError)
        else:
            self.fail("No exception raised")
    def test_noraise_finally(self):
        try:
            try:
                pass
            finally:
                raise OSError
        except OSError as e:
            self.assertTrue(e.__context__ is None)
        else:
            self.fail("No exception raised")
    def test_raise_finally(self):
        try:
            try:
                1/0
            finally:
                raise OSError
        except OSError as e:
            self.assertIsInstance(e.__context__, ZeroDivisionError)
        else:
            self.fail("No exception raised")
    def test_context_manager(self):
        class ContextManager:
            def __enter__(self):
                pass
            def __exit__(self, t, v, tb):
                xyzzy
        try:
            with ContextManager():
                1/0
        except NameError as e:
            self.assertIsInstance(e.__context__, ZeroDivisionError)
        else:
            self.fail("No exception raised")
    def test_cycle_broken(self):
        # Self-cycles (when re-raising a caught exception) are broken
        try:
            try:
                1/0
            except ZeroDivisionError as e:
                raise e
        except ZeroDivisionError as e:
            self.assertTrue(e.__context__ is None, e.__context__)
    def test_reraise_cycle_broken(self):
        # Non-trivial context cycles (through re-raising a previous exception)
        # are broken too.
        try:
            try:
                xyzzy
            except NameError as a:
                try:
                    1/0
                except ZeroDivisionError:
                    raise a
        except NameError as e:
            self.assertTrue(e.__context__.__context__ is None)
    def test_3118(self):
        # deleting the generator caused the __context__ to be cleared
        def gen():
            try:
                yield 1
            finally:
                pass
        def f():
            g = gen()
            next(g)
            try:
                try:
                    raise ValueError
                except:
                    del g
                    raise KeyError
            except Exception as e:
                self.assertIsInstance(e.__context__, ValueError)
        f()
    def test_3611(self):
        # A re-raised exception in a __del__ caused the __context__
        # to be cleared
        class C:
            def __del__(self):
                try:
                    1/0
                except:
                    raise
        def f():
            x = C()
            try:
                try:
                    x.x
                except AttributeError:
                    del x
                    raise TypeError
            except Exception as e:
                self.assertNotEqual(e.__context__, None)
                self.assertIsInstance(e.__context__, AttributeError)
        with support.captured_output("stderr"):
            f()
class TestRemovedFunctionality(unittest.TestCase):
    def test_tuples(self):
        try:
            raise (IndexError, KeyError) # This should be a tuple!
        except TypeError:
            pass
        else:
            self.fail("No exception raised")
    def test_strings(self):
        try:
            raise "foo"
        except TypeError:
            pass
        else:
            self.fail("No exception raised")
def test_main():
    support.run_unittest(__name__)
if __name__ == "__main__":
    unittest.main()
 | 
	lgpl-3.0 | -2,281,057,731,552,523,800 | 25.715405 | 78 | 0.495993 | false | 
| 
	embeddedarm/android_external_chromium_org | 
	build/android/tombstones.py | 
	28 | 
	5953 | 
	#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Find the most recent tombstone file(s) on all connected devices
# and prints their stacks.
#
# Assumes tombstone file was created with current symbols.
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import optparse
from pylib import android_commands
def _ListTombstones(adb):
  """List the tombstone files on the device.
  Args:
    adb: An instance of AndroidCommands.
  Yields:
    Tuples of (tombstone filename, date time of file on device).
  """
  lines = adb.RunShellCommand('TZ=UTC su -c ls -a -l /data/tombstones')
  for line in lines:
    if 'tombstone' in line and not 'No such file or directory' in line:
      details = line.split()
      t = datetime.datetime.strptime(details[-3] + ' ' + details[-2],
                                     '%Y-%m-%d %H:%M')
      yield details[-1], t
def _GetDeviceDateTime(adb):
  """Determine the date time on the device.
  Args:
    adb: An instance of AndroidCommands.
  Returns:
    A datetime instance.
  """
  device_now_string = adb.RunShellCommand('TZ=UTC date')
  return datetime.datetime.strptime(
      device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
def _GetTombstoneData(adb, tombstone_file):
  """Retrieve the tombstone data from the device
  Args:
    tombstone_file: the tombstone to retrieve
  Returns:
    A list of lines
  """
  return adb.GetProtectedFileContents('/data/tombstones/' + tombstone_file)
def _EraseTombstone(adb, tombstone_file):
  """Deletes a tombstone from the device.
  Args:
    tombstone_file: the tombstone to delete.
  """
  return adb.RunShellCommandWithSU('rm /data/tombstones/' + tombstone_file)
def _ResolveSymbols(tombstone_data, include_stack):
  """Run the stack tool for given tombstone input.
  Args:
    tombstone_data: a list of strings of tombstone data.
    include_stack: boolean whether to include stack data in output.
  Yields:
    A string for each line of resolved stack output.
  """
  stack_tool = os.path.join(os.path.dirname(__file__), '..', '..',
                            'third_party', 'android_platform', 'development',
                            'scripts', 'stack')
  proc = subprocess.Popen(stack_tool, stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE)
  output = proc.communicate(input='\n'.join(tombstone_data))[0]
  for line in output.split('\n'):
    if not include_stack and 'Stack Data:' in line:
      break
    yield line
def _ResolveTombstone(tombstone):
  lines = []
  lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
            ', about this long ago: ' +
            (str(tombstone['device_now'] - tombstone['time']) +
            ' Device: ' + tombstone['serial'])]
  print '\n'.join(lines)
  print 'Resolving...'
  lines += _ResolveSymbols(tombstone['data'], tombstone['stack'])
  return lines
def _ResolveTombstones(jobs, tombstones):
  """Resolve a list of tombstones.
  Args:
    jobs: the number of jobs to use with multiprocess.
    tombstones: a list of tombstones.
  """
  if not tombstones:
    print 'No device attached?  Or no tombstones?'
    return
  if len(tombstones) == 1:
    data = _ResolveTombstone(tombstones[0])
  else:
    pool = multiprocessing.Pool(processes=jobs)
    data = pool.map(_ResolveTombstone, tombstones)
    data = ['\n'.join(d) for d in data]
  print '\n'.join(data)
def _GetTombstonesForDevice(adb, options):
  """Returns a list of tombstones on a given adb connection.
  Args:
    adb: An instance of Androidcommands.
    options: command line arguments from OptParse
  """
  ret = []
  all_tombstones = list(_ListTombstones(adb))
  if not all_tombstones:
    print 'No device attached?  Or no tombstones?'
    return ret
  # Sort the tombstones in date order, descending
  all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1]))
  # Only resolve the most recent unless --all-tombstones given.
  tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]]
  device_now = _GetDeviceDateTime(adb)
  for tombstone_file, tombstone_time in tombstones:
    ret += [{'serial': adb.Adb().GetSerialNumber(),
             'device_now': device_now,
             'time': tombstone_time,
             'file': tombstone_file,
             'stack': options.stack,
             'data': _GetTombstoneData(adb, tombstone_file)}]
  # Erase all the tombstones if desired.
  if options.wipe_tombstones:
    for tombstone_file, _ in all_tombstones:
      _EraseTombstone(adb, tombstone_file)
  return ret
def main():
  parser = optparse.OptionParser()
  parser.add_option('--device',
                    help='The serial number of the device. If not specified '
                         'will use all devices.')
  parser.add_option('-a', '--all-tombstones', action='store_true',
                    help="""Resolve symbols for all tombstones, rather than just
                         the most recent""")
  parser.add_option('-s', '--stack', action='store_true',
                    help='Also include symbols for stack data')
  parser.add_option('-w', '--wipe-tombstones', action='store_true',
                    help='Erase all tombstones from device after processing')
  parser.add_option('-j', '--jobs', type='int',
                    default=4,
                    help='Number of jobs to use when processing multiple '
                         'crash stacks.')
  options, args = parser.parse_args()
  if options.device:
    devices = [options.device]
  else:
    devices = android_commands.GetAttachedDevices()
  tombstones = []
  for device in devices:
    adb = android_commands.AndroidCommands(device)
    tombstones += _GetTombstonesForDevice(adb, options)
  _ResolveTombstones(options.jobs, tombstones)
if __name__ == '__main__':
  sys.exit(main())
 | 
	bsd-3-clause | -4,382,173,006,879,530,000 | 29.528205 | 80 | 0.645389 | false | 
| 
	datalogics-robb/scons | 
	src/engine/SCons/Tool/tar.py | 
	2 | 
	2320 | 
	"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
                                   source_factory = SCons.Node.FS.Entry,
                                   source_scanner = SCons.Defaults.DirScanner,
                                   suffix = '$TARSUFFIX',
                                   multi = 1)
def generate(env):
    """Add Builders and construction variables for tar to an Environment."""
    try:
        bld = env['BUILDERS']['Tar']
    except KeyError:
        bld = TarBuilder
        env['BUILDERS']['Tar'] = bld
    env['TAR']        = env.Detect(tars) or 'gtar'
    env['TARFLAGS']   = SCons.Util.CLVar('-c')
    env['TARCOM']     = '$TAR $TARFLAGS -f $TARGET $SOURCES'
    env['TARSUFFIX']  = '.tar'
def exists(env):
    return env.Detect(tars)
 | 
	mit | 3,870,170,223,994,117,000 | 33.626866 | 78 | 0.676293 | false | 
| 
	GoogleCloudPlatform/DataflowTemplates | 
	v2/common/src/test/resources/PythonTextTransformerTest/transform.py | 
	1 | 
	2207 | 
	"""
Copyright (C) 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
"""
A good transform function.
@param {string} inJson
@return {string} outJson
"""
import copy
import json
import sys
import traceback
def transform(event):
  """ Return a Dict or List of Dict Objects.  Return None to discard """
  event['new_key'] = 'new_value'
  # event = event
  return event
def _handle_result(input_data):
  event_id = copy.deepcopy(input_data['id'])
  event = copy.deepcopy(input_data['event'])
  try:
    transformed_event = transform(event)
    if isinstance(transformed_event, list):
      for row in transformed_event:
        payload = json.dumps({'status': 'SUCCESS',
                              'id': event_id,
                              'event': row,
                              'error_message': None})
        print(payload)
    else:
      payload = json.dumps({'status': 'SUCCESS',
                            'id': event_id,
                            'event': transformed_event,
                            'error_message': None})
      print(payload)
  except Exception as e:
    stack_trace = traceback.format_exc()
    payload = json.dumps({'status': 'FAILED',
                          'id': event_id,
                          'event': event,
                          'error_message': stack_trace})
    print(payload)
if __name__ == '__main__':
  # TODO: How do we handle the case where there are no messages
  file_name = sys.argv[1]
  data = []
  with open(file_name, "r") as data_file:
    for line in data_file:
      data.append(json.loads(line))
  if isinstance(data, list):
    for event in data:
      _handle_result(event)
  else:
    event = data
    _handle_result(event)
  exit()
 | 
	apache-2.0 | -6,333,046,332,267,350,000 | 29.232877 | 77 | 0.608065 | false | 
| 
	zifeishan/deepdive | 
	examples/tutorial_example/step3-more-data/experiment-reports/v00001/code/udf/ext_has_spouse_features.py | 
	60 | 
	1304 | 
	#! /usr/bin/env python
import sys
import ddlib     # DeepDive python utility
ARR_DELIM = '~^~'
# For each input tuple
for row in sys.stdin:
  parts = row.strip().split('\t')
  if len(parts) != 6:
    print >>sys.stderr, 'Failed to parse row:', row
    continue
  # Get all fields from a row
  words = parts[0].split(ARR_DELIM)
  relation_id = parts[1]
  p1_start, p1_length, p2_start, p2_length = [int(x) for x in parts[2:]]
  # Unpack input into tuples.
  span1 = ddlib.Span(begin_word_id=p1_start, length=p1_length)
  span2 = ddlib.Span(begin_word_id=p2_start, length=p2_length)
  # Features for this pair come in here
  features = set()
  # Feature 1: Bag of words between the two phrases
  words_between = ddlib.tokens_between_spans(words, span1, span2)
  for word in words_between.elements:
    features.add("word_between=" + word)
  # Feature 2: Number of words between the two phrases
  features.add("num_words_between=%s" % len(words_between.elements))
  # Feature 3: Does the last word (last name) match?
  last_word_left = ddlib.materialize_span(words, span1)[-1]
  last_word_right = ddlib.materialize_span(words, span2)[-1]
  if (last_word_left == last_word_right):
    features.add("potential_last_name_match")
  for feature in features:
    print str(relation_id) + '\t' + feature
 | 
	apache-2.0 | -7,309,405,940,012,458,000 | 30.047619 | 72 | 0.682515 | false | 
| 
	JT5D/Alfred-Popclip-Sublime | 
	Sublime Text 2/Python PEP8 Autoformat/libs/lib2to3/fixes/fix_intern.py | 
	7 | 
	1362 | 
	# Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
    PATTERN = """
    power< 'intern'
           trailer< lpar='('
                    ( not(arglist | argument<any '=' any>) obj=any
                      | obj=arglist<(not argument<any '=' any>) any ','> )
                    rpar=')' >
           after=any*
    >
    """
    def transform(self, node, results):
        syms = self.syms
        obj = results["obj"].clone()
        if obj.type == syms.arglist:
            newarglist = obj.clone()
        else:
            newarglist = pytree.Node(syms.arglist, [obj.clone()])
        after = results["after"]
        if after:
            after = [n.clone() for n in after]
        new = pytree.Node(syms.power,
                          Attr(Name(u"sys"), Name(u"intern")) +
                          [pytree.Node(syms.trailer,
                                       [results["lpar"].clone(),
                                        newarglist,
                                        results["rpar"].clone()])] + after)
        new.prefix = node.prefix
        touch_import(None, u'sys', node)
        return new
 | 
	gpl-2.0 | 8,113,873,073,803,947,000 | 29.954545 | 75 | 0.483113 | false | 
| 
	mrjefftang/psutil | 
	docs/conf.py | 
	16 | 
	7822 | 
	# -*- coding: utf-8 -*-
#
# psutil documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
PROJECT_NAME = "psutil"
AUTHOR = "Giampaolo Rodola'"
THIS_YEAR = str(datetime.datetime.now().year)
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version():
    INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py'))
    with open(INIT, 'r') as f:
        for line in f:
            if line.startswith('__version__'):
                ret = eval(line.strip().split(' = ')[1])
                assert ret.count('.') == 2, ret
                for num in ret.split('.'):
                    assert num.isdigit(), ret
                return ret
        else:
            raise ValueError("couldn't find version string")
VERSION = get_version()
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
              'sphinx.ext.coverage',
              'sphinx.ext.pngmath',
              'sphinx.ext.viewcode',
              'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_template']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = PROJECT_NAME
copyright = '2009-%s, %s' % (THIS_YEAR, AUTHOR)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
autodoc_docstring_signature = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
html_theme = 'pydoctheme'
html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
html_title = "{project} {version} documentation".format(**locals())
# A shorter title for the navigation bar.  Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
    'index': 'indexsidebar.html',
    '**': ['globaltoc.html',
           'relations.html',
           'sourcelink.html',
           'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {
#    'index': 'indexcontent.html',
# }
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%s-doc' % PROJECT_NAME
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
    ('index', '%s.tex' % PROJECT_NAME,
     '%s documentation' % PROJECT_NAME, AUTHOR),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', PROJECT_NAME, '%s documentation' % PROJECT_NAME, [AUTHOR], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
 | 
	bsd-3-clause | 4,158,857,220,753,709,600 | 30.540323 | 79 | 0.685758 | false | 
| 
	ychen820/microblog | 
	y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/auth/management/__init__.py | 
	126 | 
	2854 | 
	"""
Creates permissions for all installed apps that need permissions.
"""
from django.contrib.auth import models as auth_app
from django.db.models import get_models, signals
def _get_permission_codename(action, opts):
    return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
    "Returns (codename, name) for all permissions in the given opts."
    perms = []
    for action in ('add', 'change', 'delete'):
        perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
    return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
    from django.contrib.contenttypes.models import ContentType
    app_models = get_models(app)
    # This will hold the permissions we're looking for as
    # (content_type, (codename, name))
    searched_perms = list()
    # The codenames and ctypes that should exist.
    ctypes = set()
    for klass in app_models:
        ctype = ContentType.objects.get_for_model(klass)
        ctypes.add(ctype)
        for perm in _get_all_permissions(klass._meta):
            searched_perms.append((ctype, perm))
    # Find all the Permissions that have a context_type for a model we're
    # looking for.  We don't need to check for codenames since we already have
    # a list of the ones we're going to create.
    all_perms = set(auth_app.Permission.objects.filter(
        content_type__in=ctypes,
    ).values_list(
        "content_type", "codename"
    ))
    for ctype, (codename, name) in searched_perms:
        # If the permissions exists, move on.
        if (ctype.pk, codename) in all_perms:
            continue
        p = auth_app.Permission.objects.create(
            codename=codename,
            name=name,
            content_type=ctype
        )
        if verbosity >= 2:
            print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
    from django.core.management import call_command
    if auth_app.User in created_models and kwargs.get('interactive', True):
        msg = ("\nYou just installed Django's auth system, which means you "
            "don't have any superusers defined.\nWould you like to create one "
            "now? (yes/no): ")
        confirm = raw_input(msg)
        while 1:
            if confirm not in ('yes', 'no'):
                confirm = raw_input('Please enter either "yes" or "no": ')
                continue
            if confirm == 'yes':
                call_command("createsuperuser", interactive=True)
            break
signals.post_syncdb.connect(create_permissions,
    dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
    sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
 | 
	bsd-3-clause | 759,113,971,445,881,100 | 36.552632 | 110 | 0.644008 | false | 
| 
	gimite/personfinder | 
	app/vendors/xlrd/compdoc.py | 
	27 | 
	21226 | 
	# -*- coding: cp1252 -*-
##
# Implements the minimal functionality required
# to extract a "Workbook" or "Book" stream (as one big string)
# from an OLE2 Compound Document file.
# <p>Copyright � 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
# 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer]
# 2007-09-08 SJM Warning message if sector sizes are extremely large.
# 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted.
# 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms.
from __future__ import print_function
import sys
from struct import unpack
from .timemachine import *
import array
##
# Magic cookie that should appear in the first 8 bytes of the file.
SIGNATURE = b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"
EOCSID = -2
FREESID = -1
SATSID = -3
MSATSID = -4
EVILSID = -5
class CompDocError(Exception):
    pass
class DirNode(object):
    def __init__(self, DID, dent, DEBUG=0, logfile=sys.stdout):
        # dent is the 128-byte directory entry
        self.DID = DID
        self.logfile = logfile
        (cbufsize, self.etype, self.colour, self.left_DID, self.right_DID,
        self.root_DID) = \
            unpack('<HBBiii', dent[64:80])
        (self.first_SID, self.tot_size) = \
            unpack('<ii', dent[116:124])
        if cbufsize == 0:
            self.name = UNICODE_LITERAL('')
        else:
            self.name = unicode(dent[0:cbufsize-2], 'utf_16_le') # omit the trailing U+0000
        self.children = [] # filled in later
        self.parent = -1 # indicates orphan; fixed up later
        self.tsinfo = unpack('<IIII', dent[100:116])
        if DEBUG:
            self.dump(DEBUG)
    def dump(self, DEBUG=1):
        fprintf(
            self.logfile,
            "DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d\n",
            self.DID, self.name, self.etype, self.left_DID,
            self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size
            )
        if DEBUG == 2:
            # cre_lo, cre_hi, mod_lo, mod_hi = tsinfo
            print("timestamp info", self.tsinfo, file=self.logfile)
def _build_family_tree(dirlist, parent_DID, child_DID):
    if child_DID < 0: return
    _build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID)
    dirlist[parent_DID].children.append(child_DID)
    dirlist[child_DID].parent = parent_DID
    _build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID)
    if dirlist[child_DID].etype == 1: # storage
        _build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID)
##
# Compound document handler.
# @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The
# only operation it needs to support is slicing.
class CompDoc(object):
    def __init__(self, mem, logfile=sys.stdout, DEBUG=0):
        self.logfile = logfile
        self.DEBUG = DEBUG
        if mem[0:8] != SIGNATURE:
            raise CompDocError('Not an OLE2 compound document')
        if mem[28:30] != b'\xFE\xFF':
            raise CompDocError('Expected "little-endian" marker, found %r' % mem[28:30])
        revision, version = unpack('<HH', mem[24:28])
        if DEBUG:
            print("\nCompDoc format: version=0x%04x revision=0x%04x" % (version, revision), file=logfile)
        self.mem = mem
        ssz, sssz = unpack('<HH', mem[30:34])
        if ssz > 20: # allows for 2**20 bytes i.e. 1MB
            print("WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \
                % ssz, file=logfile)
            ssz = 9
        if sssz > ssz:
            print("WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \
                % sssz, file=logfile)
            sssz = 6
        self.sec_size = sec_size = 1 << ssz
        self.short_sec_size = 1 << sssz
        if self.sec_size != 512 or self.short_sec_size != 64:
            print("@@@@ sec_size=%d short_sec_size=%d" % (self.sec_size, self.short_sec_size), file=logfile)
        (
            SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream,
            SSAT_first_sec_sid, SSAT_tot_secs,
            MSATX_first_sec_sid, MSATX_tot_secs,
        # ) = unpack('<ii4xiiiii', mem[44:76])
        ) = unpack('<iiiiiiii', mem[44:76])
        mem_data_len = len(mem) - 512
        mem_data_secs, left_over = divmod(mem_data_len, sec_size)
        if left_over:
            #### raise CompDocError("Not a whole number of sectors")
            mem_data_secs += 1
            print("WARNING *** file size (%d) not 512 + multiple of sector size (%d)" \
                % (len(mem), sec_size), file=logfile)
        self.mem_data_secs = mem_data_secs # use for checking later
        self.mem_data_len = mem_data_len
        seen = self.seen = array.array('B', [0]) * mem_data_secs
        if DEBUG:
            print('sec sizes', ssz, sssz, sec_size, self.short_sec_size, file=logfile)
            print("mem data: %d bytes == %d sectors" % (mem_data_len, mem_data_secs), file=logfile)
            print("SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d" \
                % (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream,), file=logfile)
            print("SSAT_first_sec_sid=%d, SSAT_tot_secs=%d" % (SSAT_first_sec_sid, SSAT_tot_secs,), file=logfile)
            print("MSATX_first_sec_sid=%d, MSATX_tot_secs=%d" % (MSATX_first_sec_sid, MSATX_tot_secs,), file=logfile)
        nent = sec_size // 4 # number of SID entries in a sector
        fmt = "<%di" % nent
        trunc_warned = 0
        #
        # === build the MSAT ===
        #
        MSAT = list(unpack('<109i', mem[76:512]))
        SAT_sectors_reqd = (mem_data_secs + nent - 1) // nent
        expected_MSATX_sectors = max(0, (SAT_sectors_reqd - 109 + nent - 2) // (nent - 1))
        actual_MSATX_sectors = 0
        if MSATX_tot_secs == 0 and MSATX_first_sec_sid in (EOCSID, FREESID, 0):
            # Strictly, if there is no MSAT extension, then MSATX_first_sec_sid
            # should be set to EOCSID ... FREESID and 0 have been met in the wild.
            pass # Presuming no extension
        else:
            sid = MSATX_first_sec_sid
            while sid not in (EOCSID, FREESID, MSATSID):
                # Above should be only EOCSID according to MS & OOo docs
                # but Excel doesn't complain about FREESID. Zero is a valid
                # sector number, not a sentinel.
                if DEBUG > 1:
                    print('MSATX: sid=%d (0x%08X)' % (sid, sid), file=logfile)
                if sid >= mem_data_secs:
                    msg = "MSAT extension: accessing sector %d but only %d in file" % (sid, mem_data_secs)
                    if DEBUG > 1:
                        print(msg, file=logfile)
                        break
                    raise CompDocError(msg)
                elif sid < 0:
                    raise CompDocError("MSAT extension: invalid sector id: %d" % sid)
                if seen[sid]:
                    raise CompDocError("MSAT corruption: seen[%d] == %d" % (sid, seen[sid]))
                seen[sid] = 1
                actual_MSATX_sectors += 1
                if DEBUG and actual_MSATX_sectors > expected_MSATX_sectors:
                    print("[1]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
                offset = 512 + sec_size * sid
                MSAT.extend(unpack(fmt, mem[offset:offset+sec_size]))
                sid = MSAT.pop() # last sector id is sid of next sector in the chain
        if DEBUG and actual_MSATX_sectors != expected_MSATX_sectors:
            print("[2]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
        if DEBUG:
            print("MSAT: len =", len(MSAT), file=logfile)
            dump_list(MSAT, 10, logfile)
        #
        # === build the SAT ===
        #
        self.SAT = []
        actual_SAT_sectors = 0
        dump_again = 0
        for msidx in xrange(len(MSAT)):
            msid = MSAT[msidx]
            if msid in (FREESID, EOCSID):
                # Specification: the MSAT array may be padded with trailing FREESID entries.
                # Toleration: a FREESID or EOCSID entry anywhere in the MSAT array will be ignored.
                continue
            if msid >= mem_data_secs:
                if not trunc_warned:
                    print("WARNING *** File is truncated, or OLE2 MSAT is corrupt!!", file=logfile)
                    print("INFO: Trying to access sector %d but only %d available" \
                        % (msid, mem_data_secs), file=logfile)
                    trunc_warned = 1
                MSAT[msidx] = EVILSID
                dump_again = 1
                continue
            elif msid < -2:
                raise CompDocError("MSAT: invalid sector id: %d" % msid)
            if seen[msid]:
                raise CompDocError("MSAT extension corruption: seen[%d] == %d" % (msid, seen[msid]))
            seen[msid] = 2
            actual_SAT_sectors += 1
            if DEBUG and actual_SAT_sectors > SAT_sectors_reqd:
                print("[3]===>>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, actual_SAT_sectors, msid, file=logfile)
            offset = 512 + sec_size * msid
            self.SAT.extend(unpack(fmt, mem[offset:offset+sec_size]))
        if DEBUG:
            print("SAT: len =", len(self.SAT), file=logfile)
            dump_list(self.SAT, 10, logfile)
            # print >> logfile, "SAT ",
            # for i, s in enumerate(self.SAT):
                # print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s)
                # print >> logfile, "%d:%d " % (i, s),
            print(file=logfile)
        if DEBUG and dump_again:
            print("MSAT: len =", len(MSAT), file=logfile)
            dump_list(MSAT, 10, logfile)
            for satx in xrange(mem_data_secs, len(self.SAT)):
                self.SAT[satx] = EVILSID
            print("SAT: len =", len(self.SAT), file=logfile)
            dump_list(self.SAT, 10, logfile)
        #
        # === build the directory ===
        #
        dbytes = self._get_stream(
            self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid,
            name="directory", seen_id=3)
        dirlist = []
        did = -1
        for pos in xrange(0, len(dbytes), 128):
            did += 1
            dirlist.append(DirNode(did, dbytes[pos:pos+128], 0, logfile))
        self.dirlist = dirlist
        _build_family_tree(dirlist, 0, dirlist[0].root_DID) # and stand well back ...
        if DEBUG:
            for d in dirlist:
                d.dump(DEBUG)
        #
        # === get the SSCS ===
        #
        sscs_dir = self.dirlist[0]
        assert sscs_dir.etype == 5 # root entry
        if sscs_dir.first_SID < 0 or sscs_dir.tot_size == 0:
            # Problem reported by Frank Hoffsuemmer: some software was
            # writing -1 instead of -2 (EOCSID) for the first_SID
            # when the SCCS was empty. Not having EOCSID caused assertion
            # failure in _get_stream.
            # Solution: avoid calling _get_stream in any case when the
            # SCSS appears to be empty.
            self.SSCS = ""
        else:
            self.SSCS = self._get_stream(
                self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID,
                sscs_dir.tot_size, name="SSCS", seen_id=4)
        # if DEBUG: print >> logfile, "SSCS", repr(self.SSCS)
        #
        # === build the SSAT ===
        #
        self.SSAT = []
        if SSAT_tot_secs > 0 and sscs_dir.tot_size == 0:
            print("WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero", file=logfile)
        if sscs_dir.tot_size > 0:
            sid = SSAT_first_sec_sid
            nsecs = SSAT_tot_secs
            while sid >= 0 and nsecs > 0:
                if seen[sid]:
                    raise CompDocError("SSAT corruption: seen[%d] == %d" % (sid, seen[sid]))
                seen[sid] = 5
                nsecs -= 1
                start_pos = 512 + sid * sec_size
                news = list(unpack(fmt, mem[start_pos:start_pos+sec_size]))
                self.SSAT.extend(news)
                sid = self.SAT[sid]
            if DEBUG: print("SSAT last sid %d; remaining sectors %d" % (sid, nsecs), file=logfile)
            assert nsecs == 0 and sid == EOCSID
        if DEBUG:
            print("SSAT", file=logfile)
            dump_list(self.SSAT, 10, logfile)
        if DEBUG:
            print("seen", file=logfile)
            dump_list(seen, 20, logfile)
    def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', seen_id=None):
        # print >> self.logfile, "_get_stream", base, sec_size, start_sid, size
        sectors = []
        s = start_sid
        if size is None:
            # nothing to check against
            while s >= 0:
                if seen_id is not None:
                    if self.seen[s]:
                        raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s]))
                    self.seen[s] = seen_id
                start_pos = base + s * sec_size
                sectors.append(mem[start_pos:start_pos+sec_size])
                try:
                    s = sat[s]
                except IndexError:
                    raise CompDocError(
                        "OLE2 stream %r: sector allocation table invalid entry (%d)" %
                        (name, s)
                        )
            assert s == EOCSID
        else:
            todo = size
            while s >= 0:
                if seen_id is not None:
                    if self.seen[s]:
                        raise CompDocError("%s corruption: seen[%d] == %d" % (name, s, self.seen[s]))
                    self.seen[s] = seen_id
                start_pos = base + s * sec_size
                grab = sec_size
                if grab > todo:
                    grab = todo
                todo -= grab
                sectors.append(mem[start_pos:start_pos+grab])
                try:
                    s = sat[s]
                except IndexError:
                    raise CompDocError(
                        "OLE2 stream %r: sector allocation table invalid entry (%d)" %
                        (name, s)
                        )
            assert s == EOCSID
            if todo != 0:
                fprintf(self.logfile,
                    "WARNING *** OLE2 stream %r: expected size %d, actual size %d\n",
                    name, size, size - todo)
        return b''.join(sectors)
    def _dir_search(self, path, storage_DID=0):
        # Return matching DirNode instance, or None
        head = path[0]
        tail = path[1:]
        dl = self.dirlist
        for child in dl[storage_DID].children:
            if dl[child].name.lower() == head.lower():
                et = dl[child].etype
                if et == 2:
                    return dl[child]
                if et == 1:
                    if not tail:
                        raise CompDocError("Requested component is a 'storage'")
                    return self._dir_search(tail, child)
                dl[child].dump(1)
                raise CompDocError("Requested stream is not a 'user stream'")
        return None
    ##
    # Interrogate the compound document's directory; return the stream as a string if found, otherwise
    # return None.
    # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
    def get_named_stream(self, qname):
        d = self._dir_search(qname.split("/"))
        if d is None:
            return None
        if d.tot_size >= self.min_size_std_stream:
            return self._get_stream(
                self.mem, 512, self.SAT, self.sec_size, d.first_SID,
                d.tot_size, name=qname, seen_id=d.DID+6)
        else:
            return self._get_stream(
                self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
                d.tot_size, name=qname + " (from SSCS)", seen_id=None)
    ##
    # Interrogate the compound document's directory.
    # If the named stream is not found, (None, 0, 0) will be returned.
    # If the named stream is found and is contiguous within the original byte sequence ("mem")
    # used when the document was opened,
    # then (mem, offset_to_start_of_stream, length_of_stream) is returned.
    # Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned.
    # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
    def locate_named_stream(self, qname):
        d = self._dir_search(qname.split("/"))
        if d is None:
            return (None, 0, 0)
        if d.tot_size > self.mem_data_len:
            raise CompDocError("%r stream length (%d bytes) > file data size (%d bytes)"
                % (qname, d.tot_size, self.mem_data_len))
        if d.tot_size >= self.min_size_std_stream:
            result = self._locate_stream(
                self.mem, 512, self.SAT, self.sec_size, d.first_SID,
                d.tot_size, qname, d.DID+6)
            if self.DEBUG:
                print("\nseen", file=self.logfile)
                dump_list(self.seen, 20, self.logfile)
            return result
        else:
            return (
                self._get_stream(
                    self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID,
                    d.tot_size, qname + " (from SSCS)", None),
                0,
                d.tot_size
                )
    def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_size, qname, seen_id):
        # print >> self.logfile, "_locate_stream", base, sec_size, start_sid, expected_stream_size
        s = start_sid
        if s < 0:
            raise CompDocError("_locate_stream: start_sid (%d) is -ve" % start_sid)
        p = -99 # dummy previous SID
        start_pos = -9999
        end_pos = -8888
        slices = []
        tot_found = 0
        found_limit = (expected_stream_size + sec_size - 1) // sec_size
        while s >= 0:
            if self.seen[s]:
                print("_locate_stream(%s): seen" % qname, file=self.logfile); dump_list(self.seen, 20, self.logfile)
                raise CompDocError("%s corruption: seen[%d] == %d" % (qname, s, self.seen[s]))
            self.seen[s] = seen_id
            tot_found += 1
            if tot_found > found_limit:
                raise CompDocError(
                    "%s: size exceeds expected %d bytes; corrupt?"
                    % (qname, found_limit * sec_size)
                    ) # Note: expected size rounded up to higher sector
            if s == p+1:
                # contiguous sectors
                end_pos += sec_size
            else:
                # start new slice
                if p >= 0:
                    # not first time
                    slices.append((start_pos, end_pos))
                start_pos = base + s * sec_size
                end_pos = start_pos + sec_size
            p = s
            s = sat[s]
        assert s == EOCSID
        assert tot_found == found_limit
        # print >> self.logfile, "_locate_stream(%s): seen" % qname; dump_list(self.seen, 20, self.logfile)
        if not slices:
            # The stream is contiguous ... just what we like!
            return (mem, start_pos, expected_stream_size)
        slices.append((start_pos, end_pos))
        # print >> self.logfile, "+++>>> %d fragments" % len(slices)
        return (b''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, expected_stream_size)
# ==========================================================================================
def x_dump_line(alist, stride, f, dpos, equal=0):
    print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f)
    for value in alist[dpos:dpos + stride]:
        print(str(value), end=' ', file=f)
    print(file=f)
def dump_list(alist, stride, f=sys.stdout):
    def _dump_line(dpos, equal=0):
        print("%5d%s" % (dpos, " ="[equal]), end=' ', file=f)
        for value in alist[dpos:dpos + stride]:
            print(str(value), end=' ', file=f)
        print(file=f)
    pos = None
    oldpos = None
    for pos in xrange(0, len(alist), stride):
        if oldpos is None:
            _dump_line(pos)
            oldpos = pos
        elif alist[pos:pos+stride] != alist[oldpos:oldpos+stride]:
            if pos - oldpos > stride:
                _dump_line(pos - stride, equal=1)
            _dump_line(pos)
            oldpos = pos
    if oldpos is not None and pos is not None and pos != oldpos:
        _dump_line(pos, equal=1)
 | 
	apache-2.0 | -1,583,750,774,636,250,000 | 43.871036 | 159 | 0.53317 | false | 
| 
	CallaJun/hackprince | 
	indico/numpy/distutils/tests/test_misc_util.py | 
	69 | 
	3104 | 
	#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, \
    gpaths, get_shared_lib_extension
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
    def test_1(self):
        assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
        assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
        assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
        assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
    def test_2(self):
        assert_equal(appendpath('prefix/sub', 'name'),
                     join('prefix', 'sub', 'name'))
        assert_equal(appendpath('prefix/sub', 'sup/name'),
                     join('prefix', 'sub', 'sup', 'name'))
        assert_equal(appendpath('/prefix/sub', '/prefix/name'),
                     ajoin('prefix', 'sub', 'name'))
    def test_3(self):
        assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
                     ajoin('prefix', 'sub', 'sup', 'name'))
        assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
                     ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
        assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
                     ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath(TestCase):
    def test_1(self):
        n = lambda path: path.replace('/', sep)
        assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
        assert_equal(minrelpath('..'), '..')
        assert_equal(minrelpath(n('aa/..')), '')
        assert_equal(minrelpath(n('aa/../bb')), 'bb')
        assert_equal(minrelpath(n('aa/bb/..')), 'aa')
        assert_equal(minrelpath(n('aa/bb/../..')), '')
        assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
        assert_equal(minrelpath(n('.././..')), n('../..'))
        assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths(TestCase):
    def test_gpaths(self):
        local_path = minrelpath(join(dirname(__file__), '..'))
        ls = gpaths('command/*.py', local_path)
        assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
        f = gpaths('system_info.py', local_path)
        assert_(join(local_path, 'system_info.py')==f[0], repr(f))
class TestSharedExtension(TestCase):
    def test_get_shared_lib_extension(self):
        import sys
        ext = get_shared_lib_extension(is_python_ext=False)
        if sys.platform.startswith('linux'):
            assert_equal(ext, '.so')
        elif sys.platform.startswith('gnukfreebsd'):
            assert_equal(ext, '.so')
        elif sys.platform.startswith('darwin'):
            assert_equal(ext, '.dylib')
        elif sys.platform.startswith('win'):
            assert_equal(ext, '.dll')
        # just check for no crash
        assert_(get_shared_lib_extension(is_python_ext=True))
if __name__ == "__main__":
    run_module_suite()
 | 
	lgpl-3.0 | 8,675,482,400,004,908,000 | 40.386667 | 77 | 0.569265 | false | 
| 
	loco-odoo/localizacion_co | 
	openerp/addons/email_template/wizard/__init__.py | 
	446 | 
	1130 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#    Copyright (C) 2009 Sharoon Thomas
#    Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template_preview
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | 5,215,239,164,660,881,000 | 42.461538 | 78 | 0.625664 | false | 
| 
	Distrotech/pycairo | 
	examples/cairo_snippets/snippets_pdf.py | 
	9 | 
	1523 | 
	#!/usr/bin/env python
"""Python version of cairo-demo/cairo_snippets/cairo_snippets_pdf.c
create a file for each example rather than one large file for all examples
"""
from __future__ import division
from math import pi as M_PI  # used by many snippets
import sys
import cairo
if not cairo.HAS_PDF_SURFACE:
  raise SystemExit ('cairo was not compiled with PDF support')
from snippets import snip_list, snippet_normalize
width_in_inches, height_in_inches = 2, 2
width_in_points, height_in_points = width_in_inches * 72, height_in_inches * 72
width, height = width_in_points, height_in_points # used by snippet_normalize()
def do_snippet (snippet):
  if verbose_mode:
    print('processing %s' % snippet)
  filename = 'snippets/%s.pdf' % snippet
  surface = cairo.PDFSurface (filename, width_in_points, height_in_points)
  cr = cairo.Context (surface)
  cr.save()
  try:
    fName = 'snippets/%s.py' % snippet
    code = open(fName).read()
    exec (code, globals(), locals())
  except:
    exc_type, exc_value = sys.exc_info()[:2]
    print(exc_type, exc_value, file=sys.stderr)
  else:
    cr.restore()
    cr.show_page()
    surface.finish()
  if verbose_mode:
    print
if __name__ == '__main__':
  verbose_mode = True
  if len(sys.argv) > 1 and sys.argv[1] == '-s':
    verbose_mode = False
    del sys.argv[1]
  if len(sys.argv) > 1: # do specified snippets
    snippet_list = sys.argv[1:]
  else:                 # do all snippets
    snippet_list = snip_list
  for s in snippet_list:
    do_snippet (s)
 | 
	gpl-3.0 | 1,881,779,671,919,894,300 | 25.258621 | 79 | 0.665135 | false | 
| 
	cosmoharrigan/pylearn2 | 
	pylearn2/costs/gated_autoencoder.py | 
	39 | 
	5793 | 
	"""
Definitions of the cost for the gated-autoencoder.
"""
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from pylearn2.space import VectorSpace
class SymmetricCost(DefaultDataSpecsMixin, Cost):
    """
    Summary (Class representing the symmetric cost).
    Subclasses can define the type of data they will use.
    Mean reconstruction error is used for real valued data
    and cross-Entropy loss is used for binary.
    See Also
    --------
    "Gradient-based learning of higher-order image features"
    """
    @staticmethod
    def cost(x, y, rx, ry):
        """
        Symmetric reconstruction cost.
        Parameters
        ----------
        x : tensor_like
            Theano symbolic representing the first input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        y : tensor_like
            Theano symbolic representing the seconde input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        rx : tensor_like
            Reconstruction of the first minibatch by the model.
        ry: tensor_like
            Reconstruction of the second minibatch by the model.
        Returns
        -------
        Cost: theano_like expression
            Representation of the cost
        """
        raise NotImplementedError
    def expr(self, model, data, *args, **kwargs):
        """
        Returns a theano expression for the cost function.
        Returns a symbolic expression for a cost function applied to the
        minibatch of data.
        Optionally, may return None. This represents that the cost function
        is intractable but may be optimized via the get_gradients method.
        Parameters
        ----------
        model : a pylearn2 Model instance
        data : a batch in cost.get_data_specs() form
        kwargs : dict
            Optional extra arguments. Not used by the base class.
        """
        self.get_data_specs(model)[0].validate(data)
        x, y = data
        input_space = model.get_input_space()
        if not isinstance(input_space.components[0], VectorSpace):
            conv = input_space.components[0]
            vec = VectorSpace(conv.get_total_dimension())
            x = conv.format_as(x, vec)
        if not isinstance(input_space.components[1], VectorSpace):
            conv = input_space.components[1]
            vec = VectorSpace(conv.get_total_dimension())
            y = conv.format_as(y, vec)
        rx, ry = model.reconstructXY((x, y))
        return self.cost(x, y, rx, ry)
class SymmetricMSRE(SymmetricCost):
    """
    Summary (Symmetric cost for real valued data).
    See Also
    --------
    "Gradient-based learning of higher-order image features"
    """
    @staticmethod
    def cost(x, y, rx, ry):
        """
        Summary (Definition of the cost).
        Mean squared reconstruction error.
        Parameters
        ----------
        x : tensor_like
            Theano symbolic representing the first input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        y : tensor_like
            Theano symbolic representing the seconde input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        rx : tensor_like
            Reconstruction of the first minibatch by the model.
        ry: tensor_like
            Reconstruction of the second minibatch by the model.
        Returns
        -------
        Cost: theano_like expression
            Representation of the cost
        Notes
        -----
        Symmetric reconstruction cost as defined by Memisevic in:
        "Gradient-based learning of higher-order image features".
        This function only works with real valued data.
        """
        return (
            ((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
class NormalizedSymmetricMSRE(SymmetricCost):
    """
    Summary (Normalized Symmetric cost for real valued data).
    Notes
    -----
    Value used to observe the percentage of reconstruction.
    """
    @staticmethod
    def cost(x, y, rx, ry):
        """
        Summary (Definition of the cost).
        Normalized Mean squared reconstruction error. Values
        between 0 and 1.
        Parameters
        ----------
        x : tensor_like
            Theano symbolic representing the first input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        y : tensor_like
            Theano symbolic representing the seconde input minibatch.
            Assumed to be 2-tensors, with the first dimension
            indexing training examples and the second indexing
            data dimensions.
        rx : tensor_like
            Reconstruction of the first minibatch by the model.
        ry: tensor_like
            Reconstruction of the second minibatch by the model.
        Returns
        -------
        Cost: theano_like expression
            Representation of the cost
        Notes
        -----
        Do not use this function to train, only to monitor the
        average percentage of reconstruction achieved when training on
        real valued data.
        """
        num = (((0.5*((x - rx)**2)) + (0.5*((y - ry)**2)))).sum(axis=1).mean()
        den = ((0.5*(x.norm(2, 1)**2)) + (0.5*(y.norm(2, 1)**2))).mean()
        return num/den
 | 
	bsd-3-clause | -8,314,601,974,106,115,000 | 32.293103 | 78 | 0.599862 | false | 
| 
	jaysonsantos/servo | 
	tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_server.py | 
	299 | 
	1320 | 
	import os
import unittest
import urllib2
import json
import wptserve
from base import TestUsingServer, doc_root
class TestFileHandler(TestUsingServer):
    def test_not_handled(self):
        with self.assertRaises(urllib2.HTTPError) as cm:
            resp = self.request("/not_existing")
        self.assertEquals(cm.exception.code, 404)
class TestRewriter(TestUsingServer):
    def test_rewrite(self):
        @wptserve.handlers.handler
        def handler(request, response):
            return request.request_path
        route = ("GET", "/test/rewritten", handler)
        self.server.rewriter.register("GET", "/test/original", route[1])
        self.server.router.register(*route)
        resp = self.request("/test/original")
        self.assertEquals(200, resp.getcode())
        self.assertEquals("/test/rewritten", resp.read())
class TestRequestHandler(TestUsingServer):
    def test_exception(self):
        @wptserve.handlers.handler
        def handler(request, response):
            raise Exception
        route = ("GET", "/test/raises", handler)
        self.server.router.register(*route)
        with self.assertRaises(urllib2.HTTPError) as cm:
            resp = self.request("/test/raises")
        self.assertEquals(cm.exception.code, 500)
if __name__ == "__main__":
    unittest.main()
 | 
	mpl-2.0 | 4,185,770,732,488,002,000 | 29.697674 | 72 | 0.656061 | false | 
| 
	Rudloff/youtube-dl | 
	youtube_dl/extractor/expotv.py | 
	4 | 
	2907 | 
	from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
    int_or_none,
    unified_strdate,
)
class ExpoTVIE(InfoExtractor):
    _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
    _TEST = {
        'url': 'http://www.expotv.com/videos/reviews/3/40/NYX-Butter-lipstick/667916',
        'md5': 'fe1d728c3a813ff78f595bc8b7a707a8',
        'info_dict': {
            'id': '667916',
            'ext': 'mp4',
            'title': 'NYX Butter Lipstick Little Susie',
            'description': 'Goes on like butter, but looks better!',
            'thumbnail': 're:^https?://.*\.jpg$',
            'uploader': 'Stephanie S.',
            'upload_date': '20150520',
            'view_count': int,
        }
    }
    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)
        player_key = self._search_regex(
            r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
        config = self._download_json(
            'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key),
            video_id, 'Downloading video configuration')
        formats = []
        for fcfg in config['sources']:
            media_url = fcfg.get('file')
            if not media_url:
                continue
            if fcfg.get('type') == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
            else:
                formats.append({
                    'url': media_url,
                    'height': int_or_none(fcfg.get('height')),
                    'format_id': fcfg.get('label'),
                    'ext': self._search_regex(
                        r'filename=.*\.([a-z0-9_A-Z]+)&', media_url,
                        'file extension', default=None) or fcfg.get('type'),
                })
        self._sort_formats(formats)
        title = self._og_search_title(webpage)
        description = self._og_search_description(webpage)
        thumbnail = config.get('image')
        view_count = int_or_none(self._search_regex(
            r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
        uploader = self._search_regex(
            r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
            fatal=False)
        upload_date = unified_strdate(self._search_regex(
            r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
            fatal=False), day_first=False)
        return {
            'id': video_id,
            'formats': formats,
            'title': title,
            'description': description,
            'view_count': view_count,
            'thumbnail': thumbnail,
            'uploader': uploader,
            'upload_date': upload_date,
        }
 | 
	unlicense | 6,269,134,595,314,035,000 | 36.753247 | 93 | 0.506364 | false | 
| 
	thinksabin/lazy-devops | 
	S3 bucket Maker/IdentityAccessManagement.py | 
	1 | 
	2418 | 
	__author__ = 'gambit'
import boto
from boto.iam.connection import IAMConnection
from boto.s3.key import Key
import datetime
import time
import smtplib
import os
class IdentityAccessManagement():
    admin_access_key = "XXXXXXXXXXXXXXXXXXXXXXX"
    admin_secret_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
    def create_user(self, s3_user):
        connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
        user = connect.get_all_users()
        users = user['list_users_response']['list_users_result']['users']
        for user in users:
            if s3_user in user['user_name']:
                return False
        connect.create_user(s3_user)
        return True
    def access_key(self, s3_user):
        connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
        key = connect.create_access_key(s3_user)
        access_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'access_key_id']
        secret_key = key['create_access_key_response'][u'create_access_key_result'][u'access_key'][u'secret_access_key']
        return s3_user, access_key, secret_key
    def attach_policy(self, S3_User, bucket_name):
        policy = '''{
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Action": [
                        "s3:ListAllMyBuckets"
                    ],
                    "Effect": "Allow",
                    "Resource": "arn:aws:s3:::*"
                },
                {
                    "Action": "s3:*",
                    "Effect": "Allow",
                    "Resource": [
                        "arn:aws:s3:::%s*",
                        "arn:aws:s3:::%s*/*"
                    ]
                }
            ]
         }''' % (bucket_name, bucket_name)
        print policy
        # # Attach Policy to acces s3 bucket
        connect = IAMConnection(self.admin_access_key, self.admin_secret_key)
        connect.put_user_policy(S3_User, bucket_name, policy)
    def create_s3_bucket(self, bucket_name):
        s3 = boto.connect_s3(self.admin_access_key, self.admin_secret_key)
        all_bucket = s3.get_all_buckets()
        for bucket in all_bucket:
            name = bucket.name
            if bucket_name not in name:
                s3.create_bucket(bucket_name)
                return True
            else:
                return False
 | 
	apache-2.0 | 1,952,461,174,675,023,600 | 31.675676 | 120 | 0.535567 | false | 
| 
	mahak/cinder | 
	cinder/tests/unit/volume/drivers/test_kioxia.py | 
	2 | 
	40143 | 
	#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import unittest
from unittest import mock
from oslo_utils.secretutils import md5
from cinder import exception
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.kioxia import entities
from cinder.volume.drivers.kioxia import kumoscale as kioxia
from cinder.volume.drivers.kioxia import rest_client
VOL_BACKEND_NAME = 'kioxia_kumoscale_1'
VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2'
VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba'
VOL_SIZE = 10
VOL_PROTOCOL = 'NVMeoF'
SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77'
CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da'
CONN_HOST_NAME = 'devstack'
CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \
           'beaae2de-3a97-4be1-a739-6ac4bc5bf138'
success_prov_response = entities.ProvisionerResponse(None, None, "Success",
                                                     "Success")
fail_prov_response = entities.ProvisionerResponse(None, None, "Failure",
                                                  "Failure")
prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1')
prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2')
prov_location1 = entities.Location(VOL_UUID, prov_backend1)
prov_location2 = entities.Location(VOL_UUID, prov_backend2)
prov_volume = entities.VolumeProv(VOL_UUID, None, None, None,
                                  None, None, None, None, None, None,
                                  None, True, None, [prov_location1,
                                                     prov_location2])
prov_volumes_response = entities.ProvisionerResponse([prov_volume])
no_entities_prov_response = entities.ProvisionerResponse([], None, "Success")
class KioxiaVolumeTestCase(test.TestCase):
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
    @mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale')
    def setUp(self, mock_kumoscale, mock_get_info):
        mock_get_info.return_value = success_prov_response
        mock_kumoscale.return_value = \
            rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token')
        super(KioxiaVolumeTestCase, self).setUp()
        self.cfg = mock.Mock(spec=conf.Configuration)
        self.cfg.volume_backend_name = VOL_BACKEND_NAME
        self.cfg.url = 'dummyURL'
        self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE'
        self.cfg.cafile = 'dummy'
        self.cfg.num_replicas = 1
        self.cfg.block_size = 512
        self.cfg.max_iops_per_gb = 1000
        self.cfg.desired_iops_per_gb = 1000
        self.cfg.max_bw_per_gb = 1000
        self.cfg.desired_bw_per_gb = 1000
        self.cfg.same_rack_allowed = False
        self.cfg.max_replica_down_time = 5
        self.cfg.span_allowed = True
        self.cfg.vol_reserved_space_percentage = 20
        self.cfg.provisioning_type = 'THIN'
        self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg)
        self.driver.configuration.get = lambda *args, **kwargs: {}
        self.driver.num_replicas = 2
        self.expected_stats = {
            'volume_backend_name': VOL_BACKEND_NAME,
            'vendor_name': 'KIOXIA',
            'driver_version': self.driver.VERSION,
            'storage_protocol': 'NVMeOF',
            'consistencygroup_support': False,
            'thin_provisioning_support': True,
            'multiattach': False,
            'total_capacity_gb': 1000,
            'free_capacity_gb': 600
        }
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info')
    def test_get_kumoscale(self, mock_get_info):
        mock_get_info.return_value = success_prov_response
        result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token',
                                            'cert')
        self.assertEqual(result.mgmt_ips, ['1.2.3.4'])
        self.assertEqual(result.port, '8090')
        self.assertEqual(result.token, 'token')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
    def test_volume_create_success(self, mock_create_volume):
        testvol = _stub_volume()
        mock_create_volume.return_value = success_prov_response
        result = self.driver.create_volume(testvol)
        args, kwargs = mock_create_volume.call_args
        mock_call = args[0]
        self.assertEqual(mock_call.alias, testvol['name'][:27])
        self.assertEqual(mock_call.capacity, testvol['size'])
        self.assertEqual(mock_call.uuid, testvol['id'])
        self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
    def test_volume_create_failure(self, mock_create_volume):
        testvol = _stub_volume()
        mock_create_volume.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_volume, testvol)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume')
    def test_volume_create_exception(self, mock_create_volume):
        testvol = _stub_volume()
        mock_create_volume.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_volume, testvol)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
    def test_delete_volume_success(self, mock_delete_volume):
        testvol = _stub_volume()
        mock_delete_volume.return_value = success_prov_response
        result = self.driver.delete_volume(testvol)
        mock_delete_volume.assert_any_call(testvol['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
    def test_delete_volume_failure(self, mock_delete_volume):
        testvol = _stub_volume()
        mock_delete_volume.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.delete_volume, testvol)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume')
    def test_delete_volume_exception(self, mock_delete_volume):
        testvol = _stub_volume()
        mock_delete_volume.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.delete_volume, testvol)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection(self, mock_host_probe,
                                   mock_publish,
                                   mock_get_volumes_by_uuid,
                                   mock_get_targets,
                                   mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target1 = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target1])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        result = self.driver.initialize_connection(testvol, testconn)
        mock_host_probe.assert_any_call(testconn['nqn'],
                                        testconn['uuid'],
                                        testconn['host'],
                                        'Agent', 'cinder-driver-0.1', 30)
        mock_publish.assert_any_call(testconn['uuid'], testvol['id'])
        mock_get_volumes_by_uuid.assert_any_call(testvol['id'])
        mock_get_targets.assert_any_call(testconn['uuid'], testvol['id'])
        mock_get_backend_by_id.assert_any_call('dummy-pid-1')
        expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')],
                            'target_nqn': 'target.nqn',
                            'vol_uuid': testvol['id']}
        expected_data = {
            'vol_uuid': testvol['id'],
            'alias': testvol['name'],
            'writable': True,
            'volume_replicas': [expected_replica]
        }
        expected_result = {
            'driver_volume_type': 'nvmeof',
            'data': expected_data
        }
        self.assertDictEqual(result, expected_result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_host_probe_failure(self, mock_host_probe,
                                                      mock_publish,
                                                      mock_get_volumes_by_uuid,
                                                      mock_get_targets,
                                                      mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = fail_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_host_probe_exception(
            self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid,
            mock_get_targets, mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.side_effect = Exception()
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_publish_failure(self, mock_host_probe,
                                                   mock_publish,
                                                   mock_get_volumes_by_uuid,
                                                   mock_get_targets,
                                                   mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = fail_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_publish_exception(self, mock_host_probe,
                                                     mock_publish,
                                                     mock_get_volumes_by_uuid,
                                                     mock_get_targets,
                                                     mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.side_effect = Exception()
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_volumes_failure(self, mock_host_probe,
                                                   mock_publish,
                                                   mock_get_volumes_by_uuid,
                                                   mock_get_targets,
                                                   mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = fail_prov_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_no_volumes(self, mock_host_probe,
                                              mock_publish,
                                              mock_get_volumes_by_uuid,
                                              mock_get_targets,
                                              mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = no_entities_prov_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_volumes_exception(self, mock_host_probe,
                                                     mock_publish,
                                                     mock_get_volumes_by_uuid,
                                                     mock_get_targets,
                                                     mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.side_effect = Exception()
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_targets_failure(self, mock_host_probe,
                                                   mock_publish,
                                                   mock_get_volumes_by_uuid,
                                                   mock_get_targets,
                                                   mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = fail_prov_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_no_targets(self, mock_host_probe,
                                              mock_publish,
                                              mock_get_volumes_by_uuid,
                                              mock_get_targets,
                                              mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = no_entities_prov_response
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_targets_exception(self, mock_host_probe,
                                                     mock_publish,
                                                     mock_get_volumes_by_uuid,
                                                     mock_get_targets,
                                                     mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP')
        backend = BackendEntity([prov_portal])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.side_effect = Exception()
        mock_get_backend_by_id.return_value = \
            entities.ProvisionerResponse([backend])
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_backend_failure(self, mock_host_probe,
                                                   mock_publish,
                                                   mock_get_volumes_by_uuid,
                                                   mock_get_targets,
                                                   mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_no_backend(self, mock_host_probe,
                                              mock_publish,
                                              mock_get_volumes_by_uuid,
                                              mock_get_targets,
                                              mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.return_value = no_entities_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'publish')
    @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe')
    def test_initialize_connection_backend_exception(self, mock_host_probe,
                                                     mock_publish,
                                                     mock_get_volumes_by_uuid,
                                                     mock_get_targets,
                                                     mock_get_backend_by_id):
        testvol = _stub_volume()
        testconn = _stub_connector()
        prov_target = TargetEntity('target.nqn', prov_backend1)
        prov_targets_response = entities.ProvisionerResponse([prov_target])
        mock_publish.return_value = success_prov_response
        mock_host_probe.return_value = success_prov_response
        mock_get_volumes_by_uuid.return_value = prov_volumes_response
        mock_get_targets.return_value = prov_targets_response
        mock_get_backend_by_id.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.initialize_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
    def test_terminate_connection(self, mock_unpublish):
        testvol = _stub_volume()
        testconn = _stub_connector()
        mock_unpublish.return_value = success_prov_response
        result = self.driver.terminate_connection(testvol, testconn)
        mock_unpublish.assert_any_call(testconn['uuid'], testvol['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
    def test_terminate_connection_unpublish_failure(self, mock_unpublish):
        testvol = _stub_volume()
        testconn = _stub_connector()
        mock_unpublish.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.terminate_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish')
    def test_terminate_connection_unpublish_exception(self, mock_unpublish):
        testvol = _stub_volume()
        testconn = _stub_connector()
        mock_unpublish.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.terminate_connection, testvol, testconn)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
    def test_get_volume_stats(self, mock_get_tenants):
        tenant = TenantEntity(1000, 400)
        mock_get_tenants.return_value = entities.ProvisionerResponse([tenant])
        result = self.driver.get_volume_stats(True)
        mock_get_tenants.assert_any_call()
        self.assertDictEqual(result, self.expected_stats)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
    def test_get_volume_stats_tenants_failure(self, mock_get_tenants):
        mock_get_tenants.return_value = fail_prov_response
        self.expected_stats['total_capacity_gb'] = 'unknown'
        self.expected_stats['free_capacity_gb'] = 'unknown'
        self.assertDictEqual(
            self.driver.get_volume_stats(True), self.expected_stats)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
    def test_get_volume_stats_no_tenants(self, mock_get_tenants):
        mock_get_tenants.return_value = no_entities_prov_response
        self.expected_stats['total_capacity_gb'] = 'unknown'
        self.expected_stats['free_capacity_gb'] = 'unknown'
        self.assertDictEqual(
            self.driver.get_volume_stats(True), self.expected_stats)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants')
    def test_get_volume_stats_tenants_exception(self, mock_get_tenants):
        mock_get_tenants.side_effect = Exception()
        self.expected_stats['total_capacity_gb'] = 'unknown'
        self.expected_stats['free_capacity_gb'] = 'unknown'
        self.assertDictEqual(
            self.driver.get_volume_stats(True), self.expected_stats)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
    def test_create_snapshot_success(self, mock_create_snapshot):
        testsnap = _stub_snapshot()
        mock_create_snapshot.return_value = success_prov_response
        result = self.driver.create_snapshot(testsnap)
        args, kwargs = mock_create_snapshot.call_args
        mock_call = args[0]
        self.assertEqual(mock_call.alias, testsnap['name'])
        self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
        self.assertEqual(mock_call.snapshotID, testsnap['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
    def test_create_snapshot_failure(self, mock_create_snapshot):
        testsnap = _stub_snapshot()
        mock_create_snapshot.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_snapshot, testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot')
    def test_create_snapshot_exception(self, mock_create_snapshot):
        testsnap = _stub_snapshot()
        mock_create_snapshot.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_snapshot, testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
    def test_delete_snapshot_success(self, mock_delete_snapshot):
        testsnap = _stub_snapshot()
        mock_delete_snapshot.return_value = success_prov_response
        result = self.driver.delete_snapshot(testsnap)
        mock_delete_snapshot.assert_any_call(testsnap['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
    def test_delete_snapshot_failure(self, mock_delete_snapshot):
        testsnap = _stub_snapshot()
        mock_delete_snapshot.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.delete_snapshot, testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot')
    def test_delete_snapshot_exception(self, mock_delete_snapshot):
        testsnap = _stub_snapshot()
        mock_delete_snapshot.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.delete_snapshot, testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
    def test_create_volume_from_snapshot_success(self,
                                                 mock_create_snapshot_volume):
        testsnap = _stub_snapshot()
        testvol = _stub_volume()
        mock_create_snapshot_volume.return_value = success_prov_response
        result = self.driver.create_volume_from_snapshot(testvol, testsnap)
        args, kwargs = mock_create_snapshot_volume.call_args
        mock_call = args[0]
        self.assertEqual(mock_call.alias, testvol['name'])
        self.assertEqual(mock_call.volumeID, testsnap['volume_id'])
        self.assertEqual(mock_call.snapshotID, testsnap['id'])
        self.assertEqual(mock_call.protocol, VOL_PROTOCOL)
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
    def test_create_volume_from_snapshot_failure(self,
                                                 mock_create_snapshot_volume):
        testsnap = _stub_snapshot()
        testvol = _stub_volume()
        mock_create_snapshot_volume.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_volume_from_snapshot, testvol,
                          testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume')
    def test_create_volume_from_snapshot_exception(
            self, mock_create_snapshot_volume):
        testsnap = _stub_snapshot()
        testvol = _stub_volume()
        mock_create_snapshot_volume.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_volume_from_snapshot, testvol,
                          testsnap)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
    def test_extend_volume_success(self, mock_expand_volume):
        testvol = _stub_volume()
        mock_expand_volume.return_value = success_prov_response
        new_size = VOL_SIZE + 2
        result = self.driver.extend_volume(testvol, new_size)
        mock_expand_volume.assert_any_call(new_size, testvol['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
    def test_extend_volume_failure(self, mock_expand_volume):
        testvol = _stub_volume()
        mock_expand_volume.return_value = fail_prov_response
        new_size = VOL_SIZE + 2
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.extend_volume, testvol, new_size)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume')
    def test_extend_volume_exception(self, mock_expand_volume):
        testvol = _stub_volume()
        mock_expand_volume.side_effect = Exception()
        new_size = VOL_SIZE + 2
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.extend_volume, testvol, new_size)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
    def test_create_cloned_volume_success(self, mock_clone_volume):
        testvol = _stub_volume()
        mock_clone_volume.return_value = success_prov_response
        result = self.driver.create_cloned_volume(testvol, testvol)
        args, kwargs = mock_clone_volume.call_args
        mock_call = args[0]
        self.assertEqual(mock_call.alias, testvol['name'])
        self.assertEqual(mock_call.capacity, testvol['size'])
        self.assertEqual(mock_call.volumeId, testvol['id'])
        self.assertEqual(mock_call.sourceVolumeId, testvol['id'])
        self.assertIsNone(result)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
    def test_create_cloned_volume_failure(self, mock_clone_volume):
        testvol = _stub_volume()
        mock_clone_volume.return_value = fail_prov_response
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_cloned_volume, testvol, testvol)
    @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume')
    def test_create_cloned_volume_exception(self, mock_clone_volume):
        testvol = _stub_volume()
        mock_clone_volume.side_effect = Exception()
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.create_cloned_volume, testvol, testvol)
    def test_convert_host_name(self):
        name = 'ks-node3-000c2960a794-000c2960a797'
        result = self.driver._convert_host_name(name)
        expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest()
        self.assertEqual(result, expected)
    def test_create_export(self):
        result = self.driver.create_export(None, None, None)
        self.assertIsNone(result)
    def test_ensure_export(self):
        result = self.driver.ensure_export(None, None)
        self.assertIsNone(result)
    def test_remove_export(self):
        result = self.driver.remove_export(None, None)
        self.assertIsNone(result)
    def test_check_for_setup_error(self):
        result = self.driver.check_for_setup_error()
        self.assertIsNone(result)
def _stub_volume(*args, **kwargs):
    volume = {'id': kwargs.get('id', VOL_UUID),
              'name': kwargs.get('name', VOL_NAME),
              'project_id': "test-project",
              'display_name': kwargs.get('display_name', VOL_NAME),
              'size': kwargs.get('size', VOL_SIZE),
              'provider_location': kwargs.get('provider_location', None),
              'volume_type_id': kwargs.get('volume_type_id', None)}
    return volume
def _stub_connector(*args, **kwargs):
    connector = {'uuid': kwargs.get('uuid', CONN_UUID),
                 'nqn': kwargs.get('nqn', CONN_NQN),
                 'host': kwargs.get('host', CONN_HOST_NAME)}
    return connector
def _stub_snapshot(*args, **kwargs):
    volume = {'id': kwargs.get('id', SNAP_UUID),
              'name': kwargs.get('name', 'snap2000'),
              'volume_id': kwargs.get('id', VOL_UUID)}
    return volume
class TenantEntity:
    def __init__(self, capacity, consumed):
        self.tenantId = '0'
        self.capacity = capacity
        self.consumedCapacity = consumed
class TargetEntity:
    def __init__(self, name, backend):
        self.targetName = name
        self.backend = backend
class BackendEntity:
    def __init__(self, portals):
        self.portals = portals
class PortalEntity:
    def __init__(self, ip, port, transport):
        self.ip = ip
        self.port = port
        self.transport = transport
if __name__ == '__main__':
    unittest.main()
 | 
	apache-2.0 | -6,538,985,573,473,255,000 | 51.269531 | 79 | 0.628702 | false | 
| 
	c-o-m-m-a-n-d-e-r/CouchPotatoServer | 
	libs/caper/result.py | 
	81 | 
	5904 | 
	# Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from logr import Logr
GROUP_MATCHES = ['identifier']
class CaperNode(object):
    def __init__(self, closure, parent=None, match=None):
        """
        :type parent: CaperNode
        :type weight: float
        """
        #: :type: caper.objects.CaperClosure
        self.closure = closure
        #: :type: CaperNode
        self.parent = parent
        #: :type: CaptureMatch
        self.match = match
        #: :type: list of CaptureGroup
        self.finished_groups = []
    def next(self):
        raise NotImplementedError()
    def captured(self):
        cur = self
        if cur.match:
            yield cur.match.tag, cur.match.result
        while cur.parent:
            cur = cur.parent
            if cur.match:
                yield cur.match.tag, cur.match.result
class CaperRootNode(CaperNode):
    def __init__(self, closure):
        """
        :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
        """
        super(CaperRootNode, self).__init__(closure)
    def next(self):
        return self.closure
class CaperClosureNode(CaperNode):
    def __init__(self, closure, parent=None, match=None):
        """
        :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
        """
        super(CaperClosureNode, self).__init__(closure, parent, match)
    def next(self):
        if not self.closure:
            return None
        if self.match:
            # Jump to next closure if we have a match
            return self.closure.right
        elif len(self.closure.fragments) > 0:
            # Otherwise parse the fragments
            return self.closure.fragments[0]
        return None
    def __str__(self):
        return "<CaperClosureNode match: %s>" % repr(self.match)
    def __repr__(self):
        return self.__str__()
class CaperFragmentNode(CaperNode):
    def __init__(self, closure, fragments, parent=None, match=None):
        """
        :type closure: caper.objects.CaperClosure
        :type fragments: list of caper.objects.CaperFragment
        """
        super(CaperFragmentNode, self).__init__(closure, parent, match)
        #: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment
        self.fragments = fragments
    def next(self):
        if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right:
            return self.fragments[-1].right
        if self.closure.right:
            return self.closure.right
        return None
    def __str__(self):
        return "<CaperFragmentNode match: %s>" % repr(self.match)
    def __repr__(self):
        return self.__str__()
class CaperResult(object):
    def __init__(self):
        #: :type: list of CaperNode
        self.heads = []
        self.chains = []
    def build(self):
        max_matched = 0
        for head in self.heads:
            for chain in self.combine_chain(head):
                if chain.num_matched > max_matched:
                    max_matched = chain.num_matched
                self.chains.append(chain)
        for chain in self.chains:
            chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1))
            chain.finish()
        self.chains.sort(key=lambda chain: chain.weight, reverse=True)
        for chain in self.chains:
            Logr.debug("chain weight: %.02f", chain.weight)
            Logr.debug("\tInfo: %s", chain.info)
            Logr.debug("\tWeights: %s", chain.weights)
            Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched)
    def combine_chain(self, subject, chain=None):
        nodes = subject if type(subject) is list else [subject]
        if chain is None:
            chain = CaperResultChain()
        result = []
        for x, node in enumerate(nodes):
            node_chain = chain if x == len(nodes) - 1 else chain.copy()
            if not node.parent:
                result.append(node_chain)
                continue
            node_chain.update(node)
            result.extend(self.combine_chain(node.parent, node_chain))
        return result
class CaperResultChain(object):
    def __init__(self):
        #: :type: float
        self.weight = None
        self.info = {}
        self.num_matched = 0
        self.weights = []
    def update(self, subject):
        """
        :type subject: CaperFragmentNode
        """
        if not subject.match or not subject.match.success:
            return
        # TODO this should support closure nodes
        if type(subject) is CaperFragmentNode:
            self.num_matched += len(subject.fragments) if subject.fragments is not None else 0
        self.weights.append(subject.match.weight)
        if subject.match:
            if subject.match.tag not in self.info:
                self.info[subject.match.tag] = []
            self.info[subject.match.tag].insert(0, subject.match.result)
    def finish(self):
        self.weight = sum(self.weights) / len(self.weights)
    def copy(self):
        chain = CaperResultChain()
        chain.weight = self.weight
        chain.info = copy.deepcopy(self.info)
        chain.num_matched = self.num_matched
        chain.weights = copy.copy(self.weights)
        return chain | 
	gpl-3.0 | -1,945,484,610,735,691,800 | 26.723005 | 98 | 0.598747 | false | 
| 
	ToBeReplaced/ansible-modules-extras | 
	notification/hall.py | 
	142 | 
	3619 | 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Billy Kimble <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
    - "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <[email protected]>
options:
  room_token:
    description:
      - "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
    required: true
  msg:
    description:
      - The message you wish to deliver as a notifcation
    required: true
  title:
    description:
      - The title of the message
    required: true
  picture:
    description:
      - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
    required: false
"""
EXAMPLES = """
- name: Send Hall notifiation
  local_action:
    module: hall
    room_token: <hall room integration token>
    title: Nginx
    msg: Created virtual host file on {{ inventory_hostname }}
- name: Send Hall notification if EC2 servers were created.
  when: ec2.instances|length > 0
  local_action:
    module: hall
    room_token: <hall room integration token>
    title: Server Creation
    msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
  with_items: ec2.instances
"""
HALL_API_ENDPOINT  = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
    headers = {'Content-Type': 'application/json'}
    payload=module.jsonify(payload)
    api_endpoint = HALL_API_ENDPOINT % (room_token)
    response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
    if info['status'] != 200:
        secure_url = HALL_API_ENDPOINT % ('[redacted]')
        module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
def main():
    module = AnsibleModule(
        argument_spec = dict(
            room_token  = dict(type='str', required=True),
            msg     = dict(type='str', required=True),
            title       = dict(type='str', required=True),
            picture     = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
        )
    )
    room_token = module.params['room_token']
    message = module.params['msg']
    title = module.params['title']
    picture = module.params['picture']
    payload = {'title': title, 'message': message, 'picture': picture}
    send_request_to_hall(module, room_token, payload)
    module.exit_json(msg="OK")
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
 | 
	gpl-3.0 | -5,157,466,764,462,421,000 | 36.309278 | 216 | 0.685548 | false | 
| 
	misdoro/python-ase | 
	ase/calculators/jacapo/utils/bader.py | 
	2 | 
	6745 | 
	from __future__ import print_function
import os, string, tempfile, shutil
from subprocess import Popen
from ase.io import write
from ase.units import Bohr
class Bader:
    '''class for running bader analysis and extracting data from it.
    The class runs bader, extracts the charge density and outputs it
    to a cube file. Then you call different functions of the class to
    extract the charges, volumes, etc...
    ACF.dat contains the coordinates of each atom, the charge
    associated with it according to Bader partitioning, percentage of
    the whole according to Bader partitioning and the minimum distance
    to the surface. This distance should be compared to maximum
    cut-off radius for the core region if pseudo potentials have been
    used.
    BCF.dat contains the coordinates of each Bader maxima, the charge
    within that volume, the nearest atom and the distance to that
    atom.
    AtomVolumes.dat contains the number of each volume that has been
    assigned to each atom. These numbers correspond to the number of
    the BvAtxxxx.dat files.
    The options for the executable are::
        bader [ -c bader | voronoi ]
              [ -n bader | voronoi ]
              [ -b neargrid | ongrid ]
              [ -r refine_edge_iterations ]
              [ -ref reference_charge ]
              [ -p all_atom | all_bader ]
              [ -p sel_atom | sel_bader ] [volume list]
              [ -p atom_index | bader_index ]
              [ -i cube | chgcar ]
              [ -h ] [ -v ]
              chargefile
    References:
    
    G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
    algorithm for Bader decomposition of charge density,
    Comput. Mater. Sci. 36 254-360 (2006).
    E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
    grid-based algorithm for Bader charge allocation,
    J. Comp. Chem. 28 899-908 (2007).
    W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
    algorithm without lattice bias, J. Phys.: Condens. Matter 21
    084204 (2009).
    '''
    def __init__(self, atoms):
        '''
 
        '''
        self.atoms = atoms
        #get density and write cube file
        calc = atoms.get_calculator()
        ncfile = calc.get_nc()
        base, ext = os.path.splitext(ncfile)
        x, y, z, density = calc.get_charge_density()
        cubefile = base + '_charge_density.cube'
        self.densityfile = cubefile
        if not os.path.exists(cubefile):
            write(cubefile, atoms, data=density * Bohr ** 3)
        
        #cmd to run for bader analysis. check if output exists so we
        #don't run this too often.
        acf_file = base + '_ACF.dat'
        if not os.path.exists(acf_file):
            #mk tempdir
            tempdir = tempfile.mkdtemp()
            
            cwd = os.getcwd()
            abscubefile = os.path.abspath(cubefile)
            os.chdir(tempdir)
            cmd = 'bader %s' % abscubefile
            process = Popen(cmd)
            status = Popen.wait()
            
            if status != 0:
                print(process)
            shutil.copy2('ACF.dat', os.path.join(cwd, acf_file))
            
            os.chdir(cwd)
            shutil.rmtree(tempdir)
        self.charges = []
        self.volumes = []
        #now parse the output
        f = open(acf_file, 'r')
        #skip 2 lines
        f.readline()
        f.readline()
        for i, atom in enumerate(self.atoms):
            line = f.readline()
            fields = line.split()
            n = int(fields[0])
            x = float(fields[1])
            y = float(fields[2])
            z = float(fields[3])
            chg = float(fields[4])
            mindist = float(fields[5])
            vol = float(fields[6])
            self.charges.append(chg)
            self.volumes.append(vol)
        f.close()
    def get_bader_charges(self):
        return self.charges
    def get_bader_volumes(self):
        'return volumes in Ang**3'
        return [x * Bohr ** 3 for x in self.volumes]
    def write_atom_volume(self, atomlist):
        '''write bader atom volumes to cube files.
        atomlist = [0,2] #for example
        -p sel_atom Write the selected atomic volumes, read from the
        subsequent list of volumes.
        '''
        alist = string.join([str(x) for x in atomlist], ' ')
        cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile)
        print(cmd)
        os.system(cmd)
        
    def write_bader_volume(self, atomlist):
        """write bader atom volumes to cube files.
        ::
        
          atomlist = [0,2] #  for example
          
        -p sel_bader Write the selected Bader volumes, read from the
        subsequent list of volumes.
        """
        alist = string.join([str(x) for x in atomlist], ' ')
        cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile)
        print(cmd)
        os.system(cmd)
    def write_atom_index(self):
        ''' -p atom_index Write the atomic volume index to a charge
        density file.
        '''
        cmd = 'bader -p atom_index %s' % (self.densityfile)
        print(cmd)
        os.system(cmd)
    def write_bader_index(self):
        '''
        -p bader_index Write the Bader volume index to a charge
        density file.
        '''
        cmd = 'bader -p bader_index %s' % (self.densityfile)
        print(cmd)
        os.system(cmd)
    def write_all_atom(self):
        '''
        -p all_atom Combine all volumes associated with an atom and
        write to file. This is done for all atoms and written to files
        named BvAtxxxx.dat. The volumes associated with atoms are
        those for which the maximum in charge density within the
        volume is closest to the atom.
        '''
        cmd = 'bader -p all_atom %s' % (self.densityfile)
        print(cmd)
        os.system(cmd)
    def write_all_bader(self):
        '''
        -p all_bader Write all Bader volumes (containing charge above
        threshold of 0.0001) to a file. The charge distribution in
        each volume is written to a separate file, named
        Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
        file format, depending on the format of the initial charge
        density file. These files can be quite large, so this option
        should be used with caution.
        '''
        cmd = 'bader -p all_bader %s' % (self.densityfile)
        print(cmd)
        os.system(cmd)
        
if __name__ == '__main__':
    from ase.calculators.jacapo import Jacapo
    atoms = Jacapo.read_atoms('ethylene.nc')
    b = Bader(atoms)
    print(b.get_bader_charges())
    print(b.get_bader_volumes())
    b.write_atom_volume([3, 4])
 | 
	gpl-2.0 | -2,498,351,350,100,636,700 | 30.966825 | 70 | 0.577613 | false | 
| 
	playm2mboy/edx-platform | 
	lms/djangoapps/open_ended_grading/staff_grading_service.py | 
	64 | 
	16269 | 
	"""
This module provides views that proxy to the staff grading backend service.
"""
import json
import logging
from django.conf import settings
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext as _
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.grading_service_module import GradingService, GradingServiceError
from courseware.access import has_access
from edxmako.shortcuts import render_to_string
from student.models import unique_id_for_user
from open_ended_grading.utils import does_location_exist
import dogstats_wrapper as dog_stats_api
log = logging.getLogger(__name__)
STAFF_ERROR_MESSAGE = _(
    u'Could not contact the external grading server. Please contact the '
    u'development team at {email}.'
).format(
    email=u'<a href="mailto:{tech_support_email}>{tech_support_email}</a>'.format(
        tech_support_email=settings.TECH_SUPPORT_EMAIL
    )
)
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class MockStaffGradingService(object):
    """
    A simple mockup of a staff grading service, testing.
    """
    def __init__(self):
        self.cnt = 0
    def get_next(self, course_id, location, grader_id):
        self.cnt += 1
        return {'success': True,
                'submission_id': self.cnt,
                'submission': 'Test submission {cnt}'.format(cnt=self.cnt),
                'num_graded': 3,
                'min_for_ml': 5,
                'num_pending': 4,
                'prompt': 'This is a fake prompt',
                'ml_error_info': 'ML info',
                'max_score': 2 + self.cnt % 3,
                'rubric': 'A rubric'}
    def get_problem_list(self, course_id, grader_id):
        self.cnt += 1
        return {
            'success': True,
            'problem_list': [
                json.dumps({
                    'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
                    'problem_name': "Problem 1",
                    'num_graded': 3,
                    'num_pending': 5,
                    'min_for_ml': 10,
                }),
                json.dumps({
                    'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
                    'problem_name': "Problem 2",
                    'num_graded': 1,
                    'num_pending': 5,
                    'min_for_ml': 10,
                }),
            ],
        }
    def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
                   submission_flagged):
        return self.get_next(course_id, 'fake location', grader_id)
class StaffGradingService(GradingService):
    """
    Interface to staff grading backend.
    """
    METRIC_NAME = 'edxapp.open_ended_grading.staff_grading_service'
    def __init__(self, config):
        config['render_template'] = render_to_string
        super(StaffGradingService, self).__init__(config)
        self.url = config['url'] + config['staff_grading']
        self.login_url = self.url + '/login/'
        self.get_next_url = self.url + '/get_next_submission/'
        self.save_grade_url = self.url + '/save_grade/'
        self.get_problem_list_url = self.url + '/get_problem_list/'
        self.get_notifications_url = self.url + "/get_notifications/"
    def get_problem_list(self, course_id, grader_id):
        """
        Get the list of problems for a given course.
        Args:
            course_id: course id that we want the problems of
            grader_id: who is grading this?  The anonymous user_id of the grader.
        Returns:
            dict with the response from the service.  (Deliberately not
            writing out the fields here--see the docs on the staff_grading view
            in the grading_controller repo)
        Raises:
            GradingServiceError: something went wrong with the connection.
        """
        params = {'course_id': course_id.to_deprecated_string(), 'grader_id': grader_id}
        result = self.get(self.get_problem_list_url, params)
        tags = [u'course_id:{}'.format(course_id)]
        self._record_result('get_problem_list', result, tags)
        dog_stats_api.histogram(
            self._metric_name('get_problem_list.result.length'),
            len(result.get('problem_list', []))
        )
        return result
    def get_next(self, course_id, location, grader_id):
        """
        Get the next thing to grade.
        Args:
            course_id: the course that this problem belongs to
            location: location of the problem that we are grading and would like the
                next submission for
            grader_id: who is grading this?  The anonymous user_id of the grader.
        Returns:
            dict with the response from the service.  (Deliberately not
            writing out the fields here--see the docs on the staff_grading view
            in the grading_controller repo)
        Raises:
            GradingServiceError: something went wrong with the connection.
        """
        result = self._render_rubric(
            self.get(
                self.get_next_url,
                params={
                    'location': location.to_deprecated_string(),
                    'grader_id': grader_id
                }
            )
        )
        tags = [u'course_id:{}'.format(course_id)]
        self._record_result('get_next', result, tags)
        return result
    def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,
                   submission_flagged):
        """
        Save a score and feedback for a submission.
        Returns:
            dict with keys
                'success': bool
                'error': error msg, if something went wrong.
        Raises:
            GradingServiceError if there's a problem connecting.
        """
        data = {'course_id': course_id.to_deprecated_string(),
                'submission_id': submission_id,
                'score': score,
                'feedback': feedback,
                'grader_id': grader_id,
                'skipped': skipped,
                'rubric_scores': rubric_scores,
                'rubric_scores_complete': True,
                'submission_flagged': submission_flagged}
        result = self._render_rubric(self.post(self.save_grade_url, data=data))
        tags = [u'course_id:{}'.format(course_id)]
        self._record_result('save_grade', result, tags)
        return result
    def get_notifications(self, course_id):
        params = {'course_id': course_id.to_deprecated_string()}
        result = self.get(self.get_notifications_url, params)
        tags = [
            u'course_id:{}'.format(course_id),
            u'staff_needs_to_grade:{}'.format(result.get('staff_needs_to_grade'))
        ]
        self._record_result('get_notifications', result, tags)
        return result
# don't initialize until staff_grading_service() is called--means that just
# importing this file doesn't create objects that may not have the right config
_service = None
def staff_grading_service():
    """
    Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True,
    returns a mock one, otherwise a real one.
    Caches the result, so changing the setting after the first call to this
    function will have no effect.
    """
    global _service
    if _service is not None:
        return _service
    if settings.MOCK_STAFF_GRADING:
        _service = MockStaffGradingService()
    else:
        _service = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)
    return _service
def _err_response(msg):
    """
    Return a HttpResponse with a json dump with success=False, and the given error message.
    """
    return HttpResponse(json.dumps({'success': False, 'error': msg}),
                        mimetype="application/json")
def _check_access(user, course_id):
    """
    Raise 404 if user doesn't have staff access to course_id
    """
    if not has_access(user, 'staff', course_id):
        raise Http404
    return
def get_next(request, course_id):
    """
    Get the next thing to grade for course_id and with the location specified
    in the request.
    Returns a json dict with the following keys:
    'success': bool
    'submission_id': a unique identifier for the submission, to be passed back
                     with the grade.
    'submission': the submission, rendered as read-only html for grading
    'rubric': the rubric, also rendered as html.
    'message': if there was no submission available, but nothing went wrong,
            there will be a message field.
    'error': if success is False, will have an error message with more info.
    """
    assert isinstance(course_id, basestring)
    course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
    _check_access(request.user, course_key)
    required = set(['location'])
    if request.method != 'POST':
        raise Http404
    actual = set(request.POST.keys())
    missing = required - actual
    if len(missing) > 0:
        return _err_response('Missing required keys {0}'.format(
            ', '.join(missing)))
    grader_id = unique_id_for_user(request.user)
    p = request.POST
    location = course_key.make_usage_key_from_deprecated_string(p['location'])
    return HttpResponse(json.dumps(_get_next(course_key, grader_id, location)),
                        mimetype="application/json")
def get_problem_list(request, course_id):
    """
    Get all the problems for the given course id
    Returns a json dict with the following keys:
        success: bool
        problem_list: a list containing json dicts with the following keys:
            each dict represents a different problem in the course
            location: the location of the problem
            problem_name: the name of the problem
            num_graded: the number of responses that have been graded
            num_pending: the number of responses that are sitting in the queue
            min_for_ml: the number of responses that need to be graded before
                the ml can be run
        'error': if success is False, will have an error message with more info.
    """
    assert isinstance(course_id, basestring)
    course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
    _check_access(request.user, course_key)
    try:
        response = staff_grading_service().get_problem_list(course_key, unique_id_for_user(request.user))
        # If 'problem_list' is in the response, then we got a list of problems from the ORA server.
        # If it is not, then ORA could not find any problems.
        if 'problem_list' in response:
            problem_list = response['problem_list']
        else:
            problem_list = []
            # Make an error messages to reflect that we could not find anything to grade.
            response['error'] = _(
                u'Cannot find any open response problems in this course. '
                u'Have you submitted answers to any open response assessment questions? '
                u'If not, please do so and return to this page.'
            )
        valid_problem_list = []
        for i in xrange(len(problem_list)):
            # Needed to ensure that the 'location' key can be accessed.
            try:
                problem_list[i] = json.loads(problem_list[i])
            except Exception:
                pass
            if does_location_exist(course_key.make_usage_key_from_deprecated_string(problem_list[i]['location'])):
                valid_problem_list.append(problem_list[i])
        response['problem_list'] = valid_problem_list
        response = json.dumps(response)
        return HttpResponse(response,
                            mimetype="application/json")
    except GradingServiceError:
        #This is a dev_facing_error
        log.exception(
            "Error from staff grading service in open "
            "ended grading.  server url: {0}".format(staff_grading_service().url)
        )
        #This is a staff_facing_error
        return HttpResponse(json.dumps({'success': False,
                                        'error': STAFF_ERROR_MESSAGE}))
def _get_next(course_id, grader_id, location):
    """
    Implementation of get_next (also called from save_grade) -- returns a json string
    """
    try:
        return staff_grading_service().get_next(course_id, location, grader_id)
    except GradingServiceError:
        #This is a dev facing error
        log.exception(
            "Error from staff grading service in open "
            "ended grading.  server url: {0}".format(staff_grading_service().url)
        )
        #This is a staff_facing_error
        return json.dumps({'success': False,
                           'error': STAFF_ERROR_MESSAGE})
def save_grade(request, course_id):
    """
    Save the grade and feedback for a submission, and, if all goes well, return
    the next thing to grade.
    Expects the following POST parameters:
    'score': int
    'feedback': string
    'submission_id': int
    Returns the same thing as get_next, except that additional error messages
    are possible if something goes wrong with saving the grade.
    """
    course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
    _check_access(request.user, course_key)
    if request.method != 'POST':
        raise Http404
    p = request.POST
    required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])
    skipped = 'skipped' in p
    #If the instructor has skipped grading the submission, then there will not be any rubric scores.
    #Only add in the rubric scores if the instructor has not skipped.
    if not skipped:
        required.add('rubric_scores[]')
    actual = set(p.keys())
    missing = required - actual
    if len(missing) > 0:
        return _err_response('Missing required keys {0}'.format(
            ', '.join(missing)))
    success, message = check_feedback_length(p)
    if not success:
        return _err_response(message)
    grader_id = unique_id_for_user(request.user)
    location = course_key.make_usage_key_from_deprecated_string(p['location'])
    try:
        result = staff_grading_service().save_grade(course_key,
                                                    grader_id,
                                                    p['submission_id'],
                                                    p['score'],
                                                    p['feedback'],
                                                    skipped,
                                                    p.getlist('rubric_scores[]'),
                                                    p['submission_flagged'])
    except GradingServiceError:
        #This is a dev_facing_error
        log.exception(
            "Error saving grade in the staff grading interface in open ended grading.  Request: {0} Course ID: {1}".format(
                request, course_id))
        #This is a staff_facing_error
        return _err_response(STAFF_ERROR_MESSAGE)
    except ValueError:
        #This is a dev_facing_error
        log.exception(
            "save_grade returned broken json in the staff grading interface in open ended grading: {0}".format(
                result_json))
        #This is a staff_facing_error
        return _err_response(STAFF_ERROR_MESSAGE)
    if not result.get('success', False):
        #This is a dev_facing_error
        log.warning(
            'Got success=False from staff grading service in open ended grading.  Response: {0}'.format(result_json))
        return _err_response(STAFF_ERROR_MESSAGE)
    # Ok, save_grade seemed to work.  Get the next submission to grade.
    return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),
                        mimetype="application/json")
def check_feedback_length(data):
    feedback = data.get("feedback")
    if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
        return False, "Feedback is too long, Max length is {0} characters.".format(
            MAX_ALLOWED_FEEDBACK_LENGTH
        )
    else:
        return True, ""
 | 
	agpl-3.0 | -2,235,410,977,889,313,800 | 35.559551 | 123 | 0.595427 | false | 
| 
	batxes/4c2vhic | 
	Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/Six_zebra_models29901.py | 
	2 | 
	13923 | 
	import _surface
import chimera
try:
  import chimera.runCommand
except:
  pass
from VolumePath import markerset as ms
try:
  from VolumePath import Marker_Set, Link
  new_marker_set=Marker_Set
except:
  from VolumePath import volume_path_dialog
  d= volume_path_dialog(True)
  new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
  s=new_marker_set('particle_0 geometry')
  marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((14969.5, 9355.22, 4884.95), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
  s=new_marker_set('particle_1 geometry')
  marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((15857.7, 8984.62, 4617.48), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
  s=new_marker_set('particle_2 geometry')
  marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((14048.8, 8305.7, 4500.15), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
  s=new_marker_set('particle_3 geometry')
  marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11860.6, 7514.14, 4365.92), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
  s=new_marker_set('particle_4 geometry')
  marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((11168.8, 7253.91, 4353.22), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
  s=new_marker_set('particle_5 geometry')
  marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((12710.6, 5841.18, 3924.41), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
  s=new_marker_set('particle_6 geometry')
  marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((11163.9, 4708.08, 4115.8), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
  s=new_marker_set('particle_7 geometry')
  marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((11220.3, 3116.23, 3445.88), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
  s=new_marker_set('particle_8 geometry')
  marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((10024.2, 2278.2, 2811.32), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
  s=new_marker_set('particle_9 geometry')
  marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8565.6, 1179.39, 2598.93), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
  s=new_marker_set('particle_10 geometry')
  marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7241.9, 1843.25, 1632.03), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
  s=new_marker_set('particle_11 geometry')
  marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7011.72, 1012.49, -305.89), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
  s=new_marker_set('particle_12 geometry')
  marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6903.61, 98.5444, -2158.28), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
  s=new_marker_set('particle_13 geometry')
  marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8201.9, 1002.62, -1834.4), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
  s=new_marker_set('particle_14 geometry')
  marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((7414.09, -129.02, -854.322), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
  s=new_marker_set('particle_15 geometry')
  marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((7122.12, -299.946, 714.323), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
  s=new_marker_set('particle_16 geometry')
  marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((7708.18, 230.276, 1947.9), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
  s=new_marker_set('particle_17 geometry')
  marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8140.74, 861.511, 3347.95), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
  s=new_marker_set('particle_18 geometry')
  marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((9916.15, 691.375, 3641.12), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
  s=new_marker_set('particle_19 geometry')
  marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((10028.7, 610.495, 5184.02), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
  s=new_marker_set('particle_20 geometry')
  marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9947.99, 101.655, 6937.75), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
  s=new_marker_set('particle_21 geometry')
  marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((10141.9, -1122.2, 8124.17), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
  s=new_marker_set('particle_22 geometry')
  marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9427.43, 62.891, 7740), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
  s=new_marker_set('particle_23 geometry')
  marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7792.4, 1113.02, 6928.08), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
  s=new_marker_set('particle_24 geometry')
  marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5839.87, 1426.25, 6096.11), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
  s=new_marker_set('particle_25 geometry')
  marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4866.74, 1500.33, 5695.34), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
  s=new_marker_set('particle_26 geometry')
  marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4653.24, 3936.53, 6840.6), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
  s=new_marker_set('particle_27 geometry')
  marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3660.54, 5240.38, 7746.49), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
  s=new_marker_set('particle_28 geometry')
  marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4273.05, 5438.27, 8823.37), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
  s=new_marker_set('particle_29 geometry')
  marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4960.15, 5382.51, 11055.5), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
  s=new_marker_set('particle_30 geometry')
  marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((4538.04, 5433.08, 10570.7), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
  s=new_marker_set('particle_31 geometry')
  marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4142.42, 6699.7, 10521.4), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
  s=new_marker_set('particle_32 geometry')
  marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3794.77, 8574.69, 11766.8), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
  s=new_marker_set('particle_33 geometry')
  marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4229.33, 9771.99, 11046.9), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
  s=new_marker_set('particle_34 geometry')
  marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5420.81, 10555.7, 10510.1), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
  s=new_marker_set('particle_35 geometry')
  marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((6615.4, 11834.4, 10784.8), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
  s=new_marker_set('particle_36 geometry')
  marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((8123.43, 12065.3, 11499), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
  s=new_marker_set('particle_37 geometry')
  marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8008.57, 10546.9, 12037.1), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
  s=new_marker_set('particle_38 geometry')
  marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6588.29, 10507.4, 12644.8), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
  s=new_marker_set('particle_39 geometry')
  marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5333.97, 9838.61, 11243.6), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
  s=new_marker_set('particle_40 geometry')
  marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((4610.87, 10843.4, 11069.5), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
  s=new_marker_set('particle_41 geometry')
  marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((5113.26, 9718.7, 10484.3), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
  s=new_marker_set('particle_42 geometry')
  marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((5912.2, 10033, 10071.9), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
  s=new_marker_set('particle_43 geometry')
  marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((5196.91, 9912.15, 10527.5), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
  s=new_marker_set('particle_44 geometry')
  marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((4146.77, 8424.44, 10674.7), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
  s=new_marker_set('particle_45 geometry')
  marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3517.66, 6984.69, 8251.86), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
  s=new_marker_set('particle_46 geometry')
  marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2724.62, 7296.95, 6580.21), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
  s=new_marker_set('particle_47 geometry')
  marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2710.77, 7987.75, 5648.79), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
  s=new_marker_set('particle_48 geometry')
  marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1852.63, 9762.8, 5359.63), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
  s=new_marker_set('particle_49 geometry')
  marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((41.5662, 11727.3, 5413.74), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
  s=new_marker_set('particle_50 geometry')
  marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((412.316, 12008.6, 7020.04), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
  s=new_marker_set('particle_51 geometry')
  marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((304.441, 9258.71, 7961.56), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
  s=new_marker_set('particle_52 geometry')
  marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1047.31, 9521.91, 7963.95), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
  s=new_marker_set('particle_53 geometry')
  marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1974.71, 10863.2, 8973.95), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
  s=new_marker_set('particle_54 geometry')
  marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3220.08, 11733.6, 9246.88), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
  s=new_marker_set('particle_55 geometry')
  marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((4736.09, 11001.1, 8495.51), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
  chimera.openModels.add([surf_sets[k]])
 | 
	gpl-3.0 | -4,076,574,980,432,056,300 | 45.721477 | 75 | 0.699777 | false | 
| 
	aostapenko/manila | 
	manila/scheduler/chance.py | 
	2 | 
	2704 | 
	# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from manila import exception
from manila.scheduler import driver
from oslo.config import cfg
CONF = cfg.CONF
class ChanceScheduler(driver.Scheduler):
    """Implements Scheduler as a random node selector."""
    def _filter_hosts(self, request_spec, hosts, **kwargs):
        """Filter a list of hosts based on request_spec."""
        filter_properties = kwargs.get('filter_properties', {})
        ignore_hosts = filter_properties.get('ignore_hosts', [])
        hosts = [host for host in hosts if host not in ignore_hosts]
        return hosts
    def _schedule(self, context, topic, request_spec, **kwargs):
        """Picks a host that is up at random."""
        elevated = context.elevated()
        hosts = self.hosts_up(elevated, topic)
        if not hosts:
            msg = _("Is the appropriate service running?")
            raise exception.NoValidHost(reason=msg)
        hosts = self._filter_hosts(request_spec, hosts, **kwargs)
        if not hosts:
            msg = _("Could not find another host")
            raise exception.NoValidHost(reason=msg)
        return hosts[int(random.random() * len(hosts))]
    def schedule_create_share(self, context, request_spec, filter_properties):
        """Picks a host that is up at random."""
        topic = CONF.share_topic
        host = self._schedule(context, topic, request_spec,
                              filter_properties=filter_properties)
        share_id = request_spec['share_id']
        snapshot_id = request_spec['snapshot_id']
        updated_share = driver.share_update_db(context, share_id, host)
        self.share_rpcapi.create_share(context, updated_share, host,
                                       request_spec,
                                       filter_properties,
                                       snapshot_id)
 | 
	apache-2.0 | 1,732,781,051,139,034,600 | 35.540541 | 78 | 0.644601 | false | 
| 
	wujuguang/sentry | 
	src/sentry/migrations/0098_auto__add_user__chg_field_team_owner__chg_field_activity_user__chg_fie.py | 
	36 | 
	28778 | 
	# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connections
class Migration(SchemaMigration):
    def forwards(self, orm):
        if 'auth_user' in connections['default'].introspection.table_names():
            return
        self.create_auth(orm)
    def create_auth(self, orm):
        # Adding model 'User'
        db.create_table('auth_user', (
            (u'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
            ('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
            ('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
            ('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
            ('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
            ('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
            ('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
            ('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
        ))
        db.send_create_signal(u'auth', ['User'])
        # Adding M2M table for field groups on 'User'
        db.create_table('auth_user_groups', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
            ('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.group'], null=False))
        ))
        db.create_unique('auth_user_groups', ['user_id', 'group_id'])
        # Adding M2M table for field user_permissions on 'User'
        db.create_table('auth_user_user_permissions', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
            ('permission', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.permission'], null=False))
        ))
        db.create_unique('auth_user_user_permissions', ['user_id', 'permission_id'])
    def backwards(self, orm):
        pass
    models = {
        u'auth.group': {
            'Meta': {'object_name': 'Group'},
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        u'auth.permission': {
            'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        'sentry.user': {
            'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
        },
        u'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        u'sentry.accessgroup': {
            'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
            'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
            'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
            'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}),
            'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
        },
        u'sentry.activity': {
            'Meta': {'object_name': 'Activity'},
            'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
            'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
            'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
        },
        u'sentry.alert': {
            'Meta': {'object_name': 'Alert'},
            'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
            'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'message': ('django.db.models.fields.TextField', [], {}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
            'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
            'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
        },
        u'sentry.alertrelatedgroup': {
            'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
            'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}),
            'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
        },
        u'sentry.event': {
            'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
            'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
            'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
            'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
            'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
            'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
            'message': ('django.db.models.fields.TextField', [], {}),
            'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
            'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
            'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
            'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
        },
        u'sentry.eventmapping': {
            'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"})
        },
        u'sentry.group': {
            'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
            'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
            'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
            'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
            'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
            'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
            'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
            'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
            'message': ('django.db.models.fields.TextField', [], {}),
            'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
            'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
            'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
            'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
        },
        u'sentry.groupbookmark': {
            'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
        },
        u'sentry.groupcountbyminute': {
            'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
            'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
        },
        u'sentry.groupmeta': {
            'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
            'value': ('django.db.models.fields.TextField', [], {})
        },
        u'sentry.grouptag': {
            'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
            'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
        },
        u'sentry.grouptagkey': {
            'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
        },
        u'sentry.lostpasswordhash': {
            'Meta': {'object_name': 'LostPasswordHash'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
        },
        u'sentry.option': {
            'Meta': {'object_name': 'Option'},
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
            'value': ('picklefield.fields.PickledObjectField', [], {})
        },
        u'sentry.pendingteammember': {
            'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
            'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
        },
        u'sentry.project': {
            'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
            'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
            'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
            'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
            'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
        },
        u'sentry.projectcountbyminute': {
            'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
            'date': ('django.db.models.fields.DateTimeField', [], {}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
        },
        u'sentry.projectkey': {
            'Meta': {'object_name': 'ProjectKey'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
            'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
            'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
            'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
        },
        u'sentry.projectoption': {
            'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
            'value': ('picklefield.fields.PickledObjectField', [], {})
        },
        u'sentry.searchdocument': {
            'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
            'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
        },
        u'sentry.searchtoken': {
            'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
            'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
            'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
            'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
        },
        u'sentry.tagkey': {
            'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
            'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
        },
        u'sentry.tagvalue': {
            'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
            'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
            'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
            'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
        },
        u'sentry.team': {
            'Meta': {'object_name': 'Team'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
            'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
            'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
        },
        u'sentry.teammember': {
            'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
            'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
            'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
        },
        u'sentry.useroption': {
            'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
            u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
            'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
            'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
            'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
            'value': ('picklefield.fields.PickledObjectField', [], {})
        }
    }
    complete_apps = ['sentry']
 | 
	bsd-3-clause | 3,045,616,489,184,792,600 | 82.414493 | 225 | 0.563764 | false | 
| 
	vanhonit/xmario_center | 
	softwarecenter/ui/gtk3/widgets/description.py | 
	4 | 
	47888 | 
	# Copyright (C) 2010 Matthew McGowan
#
# Authors:
#   Matthew McGowan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk
from gi.repository import GObject
from gi.repository import Pango
from softwarecenter.utils import normalize_package_description
from softwarecenter.ui.gtk3.drawing import color_to_hex
from softwarecenter.ui.gtk3.utils import point_in
_PS = Pango.SCALE
class _SpecialCasePreParsers(object):
    def preparse(self, k, desc):
        if k is None:
            return desc
        func_name = '_%s_preparser' % k.lower().replace('-', '_')
        if not hasattr(self, func_name):
            return desc
        f = getattr(self, func_name)
        return f(desc)
    # special case pre-parsers
    def _skype_preparser(self, desc):
        return desc.replace('. *', '.\n*')
    def _texlive_fonts_extra_preparser(self, desc):
        return desc.replace(')\n', ').\n').replace('--\n', '--\n\n')
class EventHelper(dict):
    # FIXME: workaround for broken event.copy()
    class ButtonEvent(object):
        def __init__(self, event):
            self.x = event.x
            self.y = event.y
            self.type = event.type
            self.button = event.button
    VALID_KEYS = (
        'event',
        'layout',
        'index',
        'within-selection',
        'drag-active',
        'drag-context')
    def __init__(self):
        dict.__init__(self)
        self.new_press(None, None, None, False)
    def __setitem__(self, k, v):
        if k not in EventHelper.VALID_KEYS:
            raise KeyError('\"%s\" is not a valid key' % k)
            return False
        return dict.__setitem__(self, k, v)
    def new_press(self, event, layout, index, within_sel):
        if event is None:
            self['event'] = None
        else:
            # this should be simply event.copy() but that appears broken
            # currently(?)
            self['event'] = EventHelper.ButtonEvent(event)
        self['layout'] = layout
        self['index'] = index
        self['within-selection'] = within_sel
        self['drag-active'] = False
        self['drag-context'] = None
class PangoLayoutProxy(object):
    """ Because i couldn't figure out how to inherit from
        pygi's Pango.Layout... """
    def __init__(self, context):
        self._layout = Pango.Layout.new(context)
    def xy_to_index(self, x, y):
        return self._layout.xy_to_index(x, y)
    def index_to_pos(self, *args):
        return self._layout.index_to_pos(*args)
    # setter proxies
    def set_attributes(self, attrs):
        return self._layout.set_attributes(attrs)
    def set_markup(self, markup):
        return self._layout.set_markup(markup, -1)
    def set_font_description(self, font_desc):
        return self._layout.set_font_description(font_desc)
    def set_wrap(self, wrap_mode):
        return self._layout.set_wrap(wrap_mode)
    def set_width(self, width):
        return self._layout.set_width(width)
    # getter proxies
    def get_text(self):
        return self._layout.get_text()
    def get_pixel_extents(self):
        return self._layout.get_pixel_extents()[1]
    def get_cursor_pos(self, index):
        return self._layout.get_cursor_pos(index)
    def get_iter(self):
        return self._layout.get_iter()
    def get_extents(self):
        return self._layout.get_extents()
class Layout(PangoLayoutProxy):
    def __init__(self, widget, text=""):
        PangoLayoutProxy.__init__(self, widget.get_pango_context())
        self.widget = widget
        self.length = 0
        self.indent = 0
        self.vspacing = None
        self.is_bullet = False
        self.index = 0
        self.allocation = Gdk.Rectangle()
        self._default_attrs = True
        self.set_markup(text)
    def __len__(self):
        return self.length
    def set_text(self, text):
        PangoLayoutProxy.set_markup(self, text)
        self.length = len(self.get_text())
    def set_allocation(self, x, y, w, h):
        a = self.allocation
        a.x = x
        a.y = y
        a.width = w
        a.height = h
    def get_position(self):
        return self.allocation.x, self.allocation.y
    def cursor_up(self, cursor, target_x=-1):
        layout = self.widget.order[cursor.paragraph]
        pos = layout.index_to_pos(cursor.index)
        x, y = pos.x, pos.y
        if target_x >= 0:
            x = target_x
        y -= _PS * self.widget.line_height
        return layout.xy_to_index(x, y), (x, y)
    def cursor_down(self, cursor, target_x=-1):
        layout = self.widget.order[cursor.paragraph]
        pos = layout.index_to_pos(cursor.index)
        x, y = pos.x, pos.y
        if target_x >= 0:
            x = target_x
        y += _PS * self.widget.line_height
        return layout.xy_to_index(x, y), (x, y)
    def index_at(self, px, py):
        #wa = self.widget.get_allocation()
        x, y = self.get_position()  # layout allocation
        (_, index, k) = self.xy_to_index((px - x) * _PS, (py - y) * _PS)
        return point_in(self.allocation, px, py), index + k
    def reset_attrs(self):
        #~ self.set_attributes(Pango.AttrList())
        self.set_markup(self.get_text())
        self._default_attrs = True
    def highlight(self, start, end, bg, fg):
        # FIXME: AttrBackground doesnt seem to be expose by gi yet??
        #~ attrs = Pango.AttrList()
        #~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, start,
        #~     end))
        #~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, start,
        #~     end))
        #~ self.set_attributes(attrs)
        # XXX: workaround
        text = self.get_text()
        new_text = (text[:start] + '<span background="%s" foreground="%s">' %
            (bg, fg))
        new_text += text[start:end]
        new_text += '</span>' + text[end:]
        self.set_markup(new_text)
        self._default_attrs = False
    def highlight_all(self, bg, fg):
        # FIXME: AttrBackground doesnt seem to be expose by gi yet??
        #~ attrs = Pango.AttrList()
        #~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, 0, -1))
        #~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, 0, -1))
        #~ self.set_attributes(attrs)
        # XXX: workaround
        text = self.get_text()
        self.set_markup('<span background="%s" foreground="%s">%s</span>' %
            (bg, fg, text))
        self._default_attrs = False
class Cursor(object):
    WORD_TERMINATORS = (' ',)   # empty space. suggestions recommended...
    def __init__(self, parent):
        self.parent = parent
        self.index = 0
        self.paragraph = 0
    def is_min(self, cursor):
        return self.get_position() <= cursor.get_position()
    def is_max(self, cursor):
        return self.get_position() >= cursor.get_position()
    def switch(self, cursor):
        this_pos = self.get_position()
        other_pos = cursor.get_position()
        self.set_position(*other_pos)
        cursor.set_position(*this_pos)
    def same_line(self, cursor):
        return self.get_current_line()[0] == cursor.get_current_line()[0]
    def get_current_line(self):
        keep_going = True
        i, it = self.index, self.parent.order[self.paragraph].get_iter()
        ln = 0
        while keep_going:
            l = it.get_line()
            ls = l.start_index
            le = ls + l.length
            if i >= ls and i <= le:
                if not it.at_last_line():
                    le -= 1
                return (self.paragraph, ln), (ls, le)
            ln += 1
            keep_going = it.next_line()
        return None, None, None
    def get_current_word(self):
        keep_going = True
        layout = self.parent.order[self.paragraph]
        text = layout.get_text()
        i, it = self.index, layout.get_iter()
        start = 0
        while keep_going:
            j = it.get_index()
            if j >= i and text[j] in self.WORD_TERMINATORS:
                return self.paragraph, (start, j)
            elif text[j] in self.WORD_TERMINATORS:
                start = j + 1
            keep_going = it.next_char()
        return self.paragraph, (start, len(layout))
    def set_position(self, paragraph, index):
        self.index = index
        self.paragraph = paragraph
    def get_position(self):
        return self.paragraph, self.index
class PrimaryCursor(Cursor):
    def __init__(self, parent):
        Cursor.__init__(self, parent)
    def __repr__(self):
        return 'Cursor: ' + str((self.paragraph, self.index))
    def get_rectangle(self, layout, a):
        if self.index < len(layout):
            pos = layout.get_cursor_pos(self.index)[1]
        else:
            pos = layout.get_cursor_pos(len(layout))[1]
        x = layout.allocation.x + pos.x / _PS
        y = layout.allocation.y + pos.y / _PS
        return x, y, 1, pos.height / _PS
    def draw(self, cr, layout, a):
        cr.set_source_rgb(0, 0, 0)
        cr.rectangle(*self.get_rectangle(layout, a))
        cr.fill()
    def zero(self):
        self.index = 0
        self.paragraph = 0
class SelectionCursor(Cursor):
    def __init__(self, cursor):
        Cursor.__init__(self, cursor.parent)
        self.cursor = cursor
        self.target_x = None
        self.target_x_indent = 0
        self.restore_point = None
    def __repr__(self):
        return 'Selection: ' + str(self.get_range())
    def __nonzero__(self):
        c = self.cursor
        return (self.paragraph, self.index) != (c.paragraph, c.index)
    @property
    def min(self):
        c = self.cursor
        return min((self.paragraph, self.index), (c.paragraph, c.index))
    @property
    def max(self):
        c = self.cursor
        return max((self.paragraph, self.index), (c.paragraph, c.index))
    def clear(self, key=None):
        self.index = self.cursor.index
        self.paragraph = self.cursor.paragraph
        self.restore_point = None
        if key not in (Gdk.KEY_uparrow, Gdk.KEY_downarrow):
            self.target_x = None
            self.target_x_indent = 0
    def set_target_x(self, x, indent):
        self.target_x = x
        self.target_x_indent = indent
    def get_range(self):
        return self.min, self.max
    def within_selection(self, pos):
        l = list(self.get_range())
        l.append(pos)
        l.sort()
        # sort the list, see if pos is in between the extents of the selection
        # range, if it is, pos is within the selection
        if pos in l:
            return l.index(pos) == 1
        return False
class TextBlock(Gtk.EventBox):
    PAINT_PRIMARY_CURSOR = False
    DEBUG_PAINT_BBOXES = False
    BULLET_POINT = u' \u2022  '
    def __init__(self):
        Gtk.EventBox.__init__(self)
        self.set_visible_window(False)
        self.set_size_request(200, -1)
        self.set_can_focus(True)
        self.set_events(Gdk.EventMask.KEY_PRESS_MASK |
                        Gdk.EventMask.ENTER_NOTIFY_MASK |
                        Gdk.EventMask.LEAVE_NOTIFY_MASK |
                        Gdk.EventMask.BUTTON_RELEASE_MASK |
                        Gdk.EventMask.POINTER_MOTION_MASK)
        self._is_new = False
        self.order = []
        self.cursor = cur = PrimaryCursor(self)
        self.selection = sel = SelectionCursor(self.cursor)
        self.clipboard = None
        #~ event_helper = EventHelper()
        self._update_cached_layouts()
        self._test_layout = self.create_pango_layout('')
        #self._xterm = Gdk.Cursor.new(Gdk.XTERM)
        # popup menu and menuitem's
        self.copy_menuitem = Gtk.ImageMenuItem.new_from_stock(
                                            Gtk.STOCK_COPY, None)
        self.select_all_menuitem = Gtk.ImageMenuItem.new_from_stock(
                                            Gtk.STOCK_SELECT_ALL, None)
        self.menu = Gtk.Menu()
        self.menu.attach_to_widget(self, None)
        self.menu.append(self.copy_menuitem)
        self.menu.append(self.select_all_menuitem)
        self.menu.show_all()
        self.copy_menuitem.connect('select', self._menu_do_copy, sel)
        self.select_all_menuitem.connect('select', self._menu_do_select_all,
            cur, sel)
        #~ Gtk.drag_source_set(self, Gdk.ModifierType.BUTTON1_MASK,
                            #~ None, Gdk.DragAction.COPY)
        #~ Gtk.drag_source_add_text_targets(self)
        #~ self.connect('drag-begin', self._on_drag_begin)
        #~ self.connect('drag-data-get', self._on_drag_data_get, sel)
        event_helper = EventHelper()
        self.connect('button-press-event', self._on_press, event_helper, cur,
            sel)
        self.connect('button-release-event', self._on_release, event_helper,
            cur, sel)
        self.connect('motion-notify-event', self._on_motion, event_helper,
            cur, sel)
        self.connect('key-press-event', self._on_key_press, cur, sel)
        self.connect('key-release-event', self._on_key_release, cur, sel)
        self.connect('focus-in-event', self._on_focus_in)
        self.connect('focus-out-event', self._on_focus_out)
        self.connect("size-allocate", self.on_size_allocate)
        self.connect('style-updated', self._on_style_updated)
    def on_size_allocate(self, *args):
        allocation = self.get_allocation()
        width = allocation.width
        x = y = 0
        for layout in self.order:
            layout.set_width(_PS * (width - layout.indent))
            if layout.index > 0:
                y += (layout.vspacing or self.line_height)
            e = layout.get_pixel_extents()
            if self.get_direction() != Gtk.TextDirection.RTL:
                layout.set_allocation(e.x + layout.indent, y + e.y,
                                      width - layout.indent, e.height)
            else:
                layout.set_allocation(x + width - e.x - e.width -
                    layout.indent - 1, y + e.y, width - layout.indent,
                    e.height)
            y += e.y + e.height
    # overrides
    def do_get_request_mode(self):
        return Gtk.SizeRequestMode.HEIGHT_FOR_WIDTH
    def do_get_preferred_height_for_width(self, width):
        height = 0
        layout = self._test_layout
        for l in self.order:
            layout.set_text(l.get_text(), -1)
            layout.set_width(_PS * (width - l.indent))
            lh = layout.get_pixel_extents()[1].height
            height += lh + (l.vspacing or self.line_height)
        height = max(50, height)
        return height, height
    def do_draw(self, cr):
        self.render(self, cr)
    def _config_colors(self):
        context = self.get_style_context()
        context.save()
        context.add_class(Gtk.STYLE_CLASS_HIGHLIGHT)
        state = self.get_state_flags()
        if self.has_focus():
            state |= Gtk.StateFlags.FOCUSED
        context.set_state(state)
        self._bg = color_to_hex(context.get_background_color(state))
        self._fg = color_to_hex(context.get_color(state))
        context.restore()
    def _on_style_updated(self, widget):
        self._config_colors()
        self._update_cached_layouts()
#    def _on_drag_begin(self, widgets, context, event_helper):
#        print 'drag: begin'
    def _on_drag_data_get(self, widget, context, selection, info, timestamp,
        sel):
#        print 'drag: get data'
        text = self.get_selected_text(sel)
        selection.set_text(text, -1)
    def _on_focus_in(self, widget, event):
        self._config_colors()
    def _on_focus_out(self, widget, event):
        self._config_colors()
    def _on_motion(self, widget, event, event_helper, cur, sel):
        if not (event.state == Gdk.ModifierType.BUTTON1_MASK):
            # or not self.has_focus():
            return
        # check if we have moved enough to count as a drag
        press = event_helper['event']
        # mvo: how can this be?
        if not press:
            return
        start_x, start_y = int(press.x), int(press.y)
        cur_x, cur_y = int(event.x), int(event.y)
        if (not event_helper['drag-active'] and
            self.drag_check_threshold(start_x, start_y, cur_x, cur_y)):
            event_helper['drag-active'] = True
        if not event_helper['drag-active']:
            return
        #~ if (event_helper['within-selection'] and
            #~ not event_helper['drag-context']):
            #~ target_list = Gtk.TargetList()
            #~ target_list.add_text_targets(80)
            #~ ctx = self.drag_begin(target_list,           # target list
                                  #~ Gdk.DragAction.COPY,   # action
                                  #~ 1,                     # initiating button
                                  #~ event)                 # event
#~
            #~ event_helper['drag-context'] = ctx
            #~ return
        for layout in self.order:
            point_in, index = layout.index_at(cur_x, cur_y)
            if point_in:
                cur.set_position(layout.index, index)
                self.queue_draw()
                break
    def _on_press(self, widget, event, event_helper, cur, sel):
        if sel and not self.has_focus():
            self.grab_focus()
            return  # spot the difference
        if not self.has_focus():
            self.grab_focus()
        if event.button == 3:
            self._button3_action(cur, sel, event)
            return
        elif event.button != 1:
            return
        for layout in self.order:
            x, y = int(event.x), int(event.y)
            point_in, index = layout.index_at(x, y)
            if point_in:
                within_sel = False
                #~ within_sel = sel.within_selection((layout.index, index))
                if not within_sel:
                    cur.set_position(layout.index, index)
                    sel.clear()
                #~ event_helper.new_press(event.copy(), layout, index,
                #~     within_sel)
                event_helper.new_press(event, layout, index, within_sel)
                break
    def _on_release(self, widget, event, event_helper, cur, sel):
        if not event_helper['event']:
            return
        # check if a drag occurred
        if event_helper['drag-active']:
            # if so, do not handle release
            return
        # else, handle release, do click
        cur.set_position(event_helper['layout'].index,
                         event_helper['index'])
        sel.clear()
        press = event_helper['event']
        if (press.type == Gdk.EventType._2BUTTON_PRESS):
            self._2click_select(cur, sel)
        elif (press.type == Gdk.EventType._3BUTTON_PRESS):
            self._3click_select(cur, sel)
        self.queue_draw()
    def _menu_do_copy(self, item, sel):
        self._copy_text(sel)
    def _menu_do_select_all(self, item, cur, sel):
        self._select_all(cur, sel)
    def _button3_action(self, cur, sel, event):
        start, end = sel.get_range()
        self.copy_menuitem.set_sensitive(True)
        self.select_all_menuitem.set_sensitive(True)
        if not sel:
            self.copy_menuitem.set_sensitive(False)
        elif start == (0, 0) and \
            end == (len(self.order) - 1, len(self.order[-1])):
            self.select_all_menuitem.set_sensitive(False)
        self.menu.popup(None,  # parent_menu_shell,
                        None,  # parent_menu_item,
                        None,  # GtkMenuPositionFunc func,
                        None,  # data,
                        event.button,
                        event.time)
    def _on_key_press(self, widget, event, cur, sel):
        kv = event.keyval
        s, i = cur.paragraph, cur.index
        handled_keys = True
        ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
        shift = (event.state & Gdk.ModifierType.SHIFT_MASK) > 0
        if not self.PAINT_PRIMARY_CURSOR and \
            kv in (Gdk.KEY_uparrow, Gdk.KEY_downarrow) and not sel:
            return False
        if kv == Gdk.KEY_Tab:
            handled_keys = False
        elif kv == Gdk.KEY_Left:
            if ctrl:
                self._select_left_word(cur, sel, s, i)
            else:
                self._select_left(cur, sel, s, i, shift)
            if shift:
                layout = self._get_cursor_layout()
                pos = layout.index_to_pos(cur.index)
                sel.set_target_x(pos.x, layout.indent)
        elif kv == Gdk.KEY_Right:
            if ctrl:
                self._select_right_word(cur, sel, s, i)
            else:
                self._select_right(cur, sel, s, i, shift)
            if shift:
                layout = self._get_cursor_layout()
                pos = layout.index_to_pos(cur.index)
                sel.set_target_x(pos.x, layout.indent)
        elif kv == Gdk.KEY_Up:
            if ctrl:
                if i == 0:
                    if s > 0:
                        cur.paragraph -= 1
                cur.set_position(cur.paragraph, 0)
            elif sel and not shift:
                cur.set_position(*sel.min)
            else:
                self._select_up(cur, sel)
        elif kv == Gdk.KEY_Down:
            if ctrl:
                if i == len(self._get_layout(cur)):
                    if s + 1 < len(self.order):
                        cur.paragraph += 1
                i = len(self._get_layout(cur))
                cur.set_position(cur.paragraph, i)
            elif sel and not shift:
                cur.set_position(*sel.max)
            else:
                self._select_down(cur, sel)
        elif kv == Gdk.KEY_Home:
            if shift:
                self._select_home(cur, sel, self.order[cur.paragraph])
            else:
                cur.set_position(0, 0)
        elif kv == Gdk.KEY_End:
            if shift:
                self._select_end(cur, sel, self.order[cur.paragraph])
            else:
                cur.paragraph = len(self.order) - 1
                cur.index = len(self._get_layout(cur))
        else:
            handled_keys = False
        if not shift and handled_keys:
            sel.clear(kv)
        self.queue_draw()
        return handled_keys
    def _on_key_release(self, widget, event, cur, sel):
        ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
        if ctrl:
            if event.keyval == Gdk.KEY_a:
                self._select_all(cur, sel)
            elif event.keyval == Gdk.KEY_c:
                self._copy_text(sel)
            self.queue_draw()
    def _select_up(self, cur, sel):
        #~ if sel and not cur.is_min(sel) and cur.same_line(sel):
            #~ cur.switch(sel)
        s = cur.paragraph
        layout = self._get_layout(cur)
        if sel.target_x:
            x = sel.target_x
            if sel.target_x_indent:
                x += (sel.target_x_indent - layout.indent) * _PS
            (_, j, k), (x, y) = layout.cursor_up(cur, x)
            j += k
        else:
            (_, j, k), (x, y) = layout.cursor_up(cur)
            j += k
            sel.set_target_x(x, layout.indent)
        if (s, j) != cur.get_position():
            cur.set_position(s, j)
        elif s > 0:
            cur.paragraph = s - 1
            layout = self._get_layout(cur)
            if sel.target_x_indent:
                x += (sel.target_x_indent - layout.indent) * _PS
            y = layout.get_extents()[0].height
            (_, j, k) = layout.xy_to_index(x, y)
            cur.set_position(s - 1, j + k)
        else:
            return False
        return True
    def _select_down(self, cur, sel):
        #~ if sel and not cur.is_max(sel) and cur.same_line(sel):
            #~ cur.switch(sel)
        s = cur.paragraph
        layout = self._get_layout(cur)
        if sel.target_x:
            x = sel.target_x
            if sel.target_x_indent:
                x += (sel.target_x_indent - layout.indent) * _PS
            (_, j, k), (x, y) = layout.cursor_down(cur, x)
            j += k
        else:
            (_, j, k), (x, y) = layout.cursor_down(cur)
            j += k
            sel.set_target_x(x, layout.indent)
        if (s, j) != cur.get_position():
            cur.set_position(s, j)
        elif s < len(self.order) - 1:
            cur.paragraph = s + 1
            layout = self._get_layout(cur)
            if sel.target_x_indent:
                x += (sel.target_x_indent - layout.indent) * _PS
            y = 0
            (_, j, k) = layout.xy_to_index(x, y)
            cur.set_position(s + 1, j + k)
        else:
            return False
        return True
    def _2click_select(self, cursor, sel):
        self._select_word(cursor, sel)
    def _3click_select(self, cursor, sel):
        # XXX:
        # _select_line seems to expose the following Pango issue:
        # (description.py:3892): Pango-CRITICAL **:
        # pango_layout_line_unref: assertion `private->ref_count > 0'
        # failed
        # ... which can result in a segfault
        #~ self._select_line(cursor, sel)
        self._select_all(cursor, sel)
    def _copy_text(self, sel):
        text = self.get_selected_text(sel)
        if not self.clipboard:
            display = Gdk.Display.get_default()
            selection = Gdk.Atom.intern("CLIPBOARD", False)
            self.clipboard = Gtk.Clipboard.get_for_display(display, selection)
        self.clipboard.clear()
        self.clipboard.set_text(text.strip(), -1)
    def _select_end(self, cur, sel, layout):
        if not cur.is_max(sel):
            cur.switch(sel)
        n, r, line = cur.get_current_line()
        cur_pos = cur.get_position()
        if cur_pos == (len(self.order) - 1, len(self.order[-1])):  # abs end
            if sel.restore_point:
                # reinstate restore point
                cur.set_position(*sel.restore_point)
            else:
                # reselect the line end
                n, r, line = sel.get_current_line()
                cur.set_position(n[0], r[1])
        elif cur_pos[1] == len(self.order[n[0]]):   # para end
            # select abs end
            cur.set_position(len(self.order) - 1, len(self.order[-1]))
        elif cur_pos == (n[0], r[1]):   # line end
            # select para end
            cur.set_position(n[0], len(self.order[n[0]]))
        else:   # not at any end, within line somewhere
            # select line end
            if sel:
                sel.restore_point = cur_pos
            cur.set_position(n[0], r[1])
    def _select_home(self, cur, sel, layout):
        if not cur.is_min(sel):
            cur.switch(sel)
        n, r, line = cur.get_current_line()
        cur_pos = cur.get_position()
        if cur_pos == (0, 0):   # absolute home
            if sel.restore_point:
                cur.set_position(*sel.restore_point)
            else:
                n, r, line = sel.get_current_line()
                cur.set_position(n[0], r[0])
        elif cur_pos[1] == 0:   # para home
            cur.set_position(0, 0)
        elif cur_pos == (n[0], r[0]):      # line home
            cur.set_position(n[0], 0)
        else:                   # not at any home, within line somewhere
            if sel:
                sel.restore_point = cur_pos
            cur.set_position(n[0], r[0])
    def _select_left(self, cur, sel, s, i, shift):
        if not shift and not cur.is_min(sel):
            cur.switch(sel)
            return
        if i > 0:
            cur.set_position(s, i - 1)
        elif cur.paragraph > 0:
            cur.paragraph -= 1
            cur.set_position(s - 1, len(self._get_layout(cur)))
    def _select_right(self, cur, sel, s, i, shift):
        if not shift and not cur.is_max(sel):
            cur.switch(sel)
            return
        if i < len(self._get_layout(cur)):
            cur.set_position(s, i + 1)
        elif s < len(self.order) - 1:
            cur.set_position(s + 1, 0)
    def _select_left_word(self, cur, sel, s, i):
        if i > 0:
            cur.index -= 1
        elif s > 0:
            cur.paragraph -= 1
            cur.index = len(self._get_layout(cur))
        paragraph, word = cur.get_current_word()
        if not word:
            return
        cur.set_position(paragraph, max(0, word[0] - 1))
    def _select_right_word(self, cur, sel, s, i):
        ll = len(self._get_layout(cur))
        if i < ll:
            cur.index += 1
        elif s + 1 < len(self.order):
            cur.paragraph += 1
            cur.index = 0
        paragraph, word = cur.get_current_word()
        if not word:
            return
        cur.set_position(paragraph, min(word[1] + 1, ll))
    def _select_word(self, cursor, sel):
        paragraph, word = cursor.get_current_word()
        if word:
            cursor.set_position(paragraph, word[1] + 1)
            sel.set_position(paragraph, word[0])
            if self.get_direction() == Gtk.TextDirection.RTL:
                cursor.switch(sel)
    def _select_line(self, cursor, sel):
        n, r = self.cursor.get_current_line()
        sel.set_position(n[0], r[0])
        cursor.set_position(n[0], r[1])
        if self.get_direction() == Gtk.TextDirection.RTL:
            cursor.switch(sel)
    def _select_all(self, cursor, sel):
        layout = self.order[-1]
        sel.set_position(0, 0)
        cursor.set_position(layout.index, len(layout))
        if self.get_direction() == Gtk.TextDirection.RTL:
            cursor.switch(sel)
    def _selection_copy(self, layout, sel, new_para=True):
        i = layout.index
        start, end = sel.get_range()
        if new_para:
            text = '\n\n'
        else:
            text = ''
        if sel and i >= start[0] and i <= end[0]:
            if i == start[0]:
                if end[0] > i:
                    return text + layout.get_text()[start[1]: len(layout)]
                else:
                    return text + layout.get_text()[start[1]: end[1]]
            elif i == end[0]:
                if start[0] < i:
                    return text + layout.get_text()[0: end[1]]
                else:
                    return text + layout.get_text()[start[1]: end[1]]
            else:
                return text + layout.get_text()
        return ''
    def _new_layout(self, text=''):
        layout = Layout(self, text)
        layout.set_wrap(Pango.WrapMode.WORD_CHAR)
        return layout
    def _update_cached_layouts(self):
        self._bullet = self._new_layout()
        self._bullet.set_markup(self.BULLET_POINT)
        font_desc = Pango.FontDescription()
        font_desc.set_weight(Pango.Weight.BOLD)
        self._bullet.set_font_description(font_desc)
        e = self._bullet.get_pixel_extents()
        self.indent, self.line_height = e.width, e.height
    def _selection_highlight(self, layout, sel, bg, fg):
        i = layout.index
        start, end = sel.get_range()
        if sel and i >= start[0] and i <= end[0]:
            if i == start[0]:
                if end[0] > i:
                    layout.highlight(start[1], len(layout), bg, fg)
                else:
                    layout.highlight(start[1], end[1], bg, fg)
            elif i == end[0]:
                if start[0] < i:
                    layout.highlight(0, end[1], bg, fg)
                else:
                    layout.highlight(start[1], end[1], bg, fg)
            else:
                layout.highlight_all(bg, fg)
        elif not layout._default_attrs:
            layout.reset_attrs()
    def _paint_bullet_point(self, cr, x, y):
        # draw the layout
        Gtk.render_layout(self.get_style_context(),
                            cr,             # state
                            x,           # x coord
                            y,           # y coord
                            self._bullet._layout)   # a Pango.Layout()
    def _get_layout(self, cursor):
        return self.order[cursor.paragraph]
    def _get_cursor_layout(self):
        return self.order[self.cursor.paragraph]
    def _get_selection_layout(self):
        return self.order[self.selection.paragraph]
    def render(self, widget, cr):
        if not self.order:
            return
        a = self.get_allocation()
        for layout in self.order:
            lx, ly = layout.get_position()
            self._selection_highlight(layout,
                                      self.selection,
                                      self._bg, self._fg)
            if layout.is_bullet:
                if self.get_direction() != Gtk.TextDirection.RTL:
                    indent = layout.indent - self.indent
                else:
                    indent = a.width - layout.indent
                self._paint_bullet_point(cr, indent, ly)
            if self.DEBUG_PAINT_BBOXES:
                la = layout.allocation
                cr.rectangle(la.x, la.y, la.width, la.height)
                cr.set_source_rgb(1, 0, 0)
                cr.stroke()
            # draw the layout
            Gtk.render_layout(self.get_style_context(),
                                cr,
                                lx,             # x coord
                                ly,             # y coord
                                layout._layout)           # a Pango.Layout()
        # draw the cursor
        if self.PAINT_PRIMARY_CURSOR and self.has_focus():
            self.cursor.draw(cr, self._get_layout(self.cursor), a)
    def append_paragraph(self, p, vspacing=None):
        l = self._new_layout()
        l.index = len(self.order)
        l.vspacing = vspacing
        l.set_text(p)
        self.order.append(l)
    def append_bullet(self, point, indent_level, vspacing=None):
        l = self._new_layout()
        l.index = len(self.order)
        l.indent = self.indent * (indent_level + 1)
        l.vspacing = vspacing
        l.is_bullet = True
        l.set_text(point)
        self.order.append(l)
    def copy_clipboard(self):
        self._copy_text(self.selection)
    def get_selected_text(self, sel=None):
        text = ''
        if not sel:
            sel = self.selection
        for layout in self.order:
            text += self._selection_copy(layout, sel, (layout.index > 0))
        return text
    def select_all(self):
        self._select_all(self.cursor, self.selection)
        self.queue_draw()
    def finished(self):
        self.queue_resize()
    def clear(self, key=None):
        self.cursor.zero()
        self.selection.clear(key)
        self.order = []
class AppDescription(Gtk.VBox):
    TYPE_PARAGRAPH = 0
    TYPE_BULLET = 1
    _preparser = _SpecialCasePreParsers()
    def __init__(self):
        Gtk.VBox.__init__(self)
        self.description = TextBlock()
        self.pack_start(self.description, False, False, 0)
        self._prev_type = None
    def _part_is_bullet(self, part):
        # normalize_description() ensures that we only have "* " bullets
        i = part.find("* ")
        return i > -1, i
    def _parse_desc(self, desc, pkgname):
        """ Attempt to maintain original fixed width layout, while
            reconstructing the description into text blocks
            (either paragraphs or bullets) which are line-wrap friendly.
        """
        # pre-parse descrition if special case exists for the given pkgname
        desc = self._preparser.preparse(pkgname, desc)
        parts = normalize_package_description(desc).split('\n')
        for part in parts:
            if not part:
                continue
            is_bullet, indent = self._part_is_bullet(part)
            if is_bullet:
                self.append_bullet(part, indent)
            else:
                self.append_paragraph(part)
        self.description.finished()
    def clear(self):
        self.description.clear()
    def append_paragraph(self, p):
        vspacing = self.description.line_height
        self.description.append_paragraph(p.strip(), vspacing)
        self._prev_type = self.TYPE_PARAGRAPH
    def append_bullet(self, point, indent_level):
        if self._prev_type == self.TYPE_BULLET:
            vspacing = int(0.4 * self.description.line_height)
        else:
            vspacing = self.description.line_height
        self.description.append_bullet(
                        point[indent_level + 2:], indent_level, vspacing)
        self._prev_type = self.TYPE_BULLET
    def set_description(self, raw_desc, pkgname):
        self.clear()
        if type(raw_desc) == str:
            encoded_desc = unicode(raw_desc, 'utf8').encode('utf8')
        else:
            encoded_desc = raw_desc.encode('utf8')
        self._text = GObject.markup_escape_text(encoded_desc)
        self._parse_desc(self._text, pkgname)
        self.show_all()
    # easy access to some TextBlock methods
    def copy_clipboard(self):
        return TextBlock.copy_clipboard(self.description)
    def get_selected_text(self):
        return TextBlock.get_selected_text(self.description)
    def select_all(self):
        return TextBlock.select_all(self.description)
def get_test_description_window():
    EXAMPLE0 = """p7zip is the Unix port of 7-Zip, a file archiver that \
archives with very high compression ratios.
p7zip-full provides:
 - /usr/bin/7za a standalone version of the 7-zip tool that handles
   7z archives (implementation of the LZMA compression algorithm) and some \
other formats.
 - /usr/bin/7z not only does it handle 7z but also ZIP, Zip64, CAB, RAR, \
ARJ, GZIP,
   BZIP2, TAR, CPIO, RPM, ISO and DEB archives. 7z compression is 30-50% \
better than ZIP compression.
p7zip provides 7zr, a light version of 7za, and p7zip a gzip like wrapper \
around 7zr."""
    EXAMPLE1 = """Transmageddon supports almost any format as its input and \
can generate a very large host of output files. The goal of the application \
was to help people to create the files they need to be able to play on their \
mobile devices and for people not hugely experienced with multimedia to \
generate a multimedia file without having to resort to command line tools \
with ungainly syntaxes.
The currently supported codecs are:
 * Containers:
  - Ogg
  - Matroska
  - AVI
  - MPEG TS
  - flv
  - QuickTime
  - MPEG4
  - 3GPP
  - MXT
 * Audio encoders:
  - Vorbis
  - FLAC
  - MP3
  - AAC
  - AC3
  - Speex
  - Celt
 * Video encoders:
  - Theora
  - Dirac
  - H264
  - MPEG2
  - MPEG4/DivX5
  - xvid
  - DNxHD
It also provide the support for the GStreamer's plugins auto-search."""
    EXAMPLE2 = """File-roller is an archive manager for the GNOME \
environment. It allows you to:
 * Create and modify archives.
 * View the content of an archive.
 * View a file contained in an archive.
 * Extract files from the archive.
File-roller supports the following formats:
 * Tar (.tar) archives, including those compressed with
   gzip (.tar.gz, .tgz), bzip (.tar.bz, .tbz), bzip2 (.tar.bz2, .tbz2),
   compress (.tar.Z, .taz), lzip (.tar.lz, .tlz), lzop (.tar.lzo, .tzo),
   lzma (.tar.lzma) and xz (.tar.xz)
 * Zip archives (.zip)
 * Jar archives (.jar, .ear, .war)
 * 7z archives (.7z)
 * iso9660 CD images (.iso)
 * Lha archives (.lzh)
 * Single files compressed with gzip (.gz), bzip (.bz), bzip2 (.bz2),
   compress (.Z), lzip (.lz), lzop (.lzo), lzma (.lzma) and xz (.xz)
File-roller doesn't perform archive operations by itself, but relies on \
standard tools for this."""
    EXAMPLE3 = """This package includes the following CTAN packages:
 Asana-Math -- A font to typeset maths in Xe(La)TeX.
 albertus --
 allrunes -- Fonts and LaTeX package for almost all runes.
 antiqua -- the URW Antiqua Condensed Font.
 antp -- Antykwa Poltawskiego: a Type 1 family of Polish traditional type.
 antt -- Antykwa Torunska: a Type 1 family of a Polish traditional type.
 apl -- Fonts for typesetting APL programs.
 ar -- Capital A and capital R ligature for Apsect Ratio.
 archaic -- A collection of archaic fonts.
 arev -- Fonts and LaTeX support files for Arev Sans.
 ascii -- Support for IBM "standard ASCII" font.
 astro -- Astronomical (planetary) symbols.
 atqolive --
 augie -- Calligraphic font for typesetting handwriting.
 auncial-new -- Artificial Uncial font and LaTeX support macros.
 aurical -- Calligraphic fonts for use with LaTeX in T1 encoding.
 barcodes -- Fonts for making barcodes.
 bayer -- Herbert Bayers Universal Font For Metafont.
 bbding -- A symbol (dingbat) font and LaTeX macros for its use.
 bbm -- "Blackboard-style" cm fonts.
 bbm-macros -- LaTeX support for "blackboard-style" cm fonts.
 bbold -- Sans serif blackboard bold.
 belleek -- Free replacement for basic MathTime fonts.
 bera -- Bera fonts.
 blacklettert1 -- T1-encoded versions of Haralambous old German fonts.
 boisik -- A font inspired by Baskerville design.
 bookhands -- A collection of book-hand fonts.
 braille -- Support for braille.
 brushscr -- A handwriting script font.
 calligra -- Calligraphic font.
 carolmin-ps -- Adobe Type 1 format of Carolingian Minuscule fonts.
 cherokee -- A font for the Cherokee script.
 clarendo --
 cm-lgc -- Type 1 CM-based fonts for Latin, Greek and Cyrillic.
 cmbright -- Computer Modern Bright fonts.
 cmll -- Symbols for linear logic.
 cmpica -- A Computer Modern Pica variant.
 coronet --
 courier-scaled -- Provides a scaled Courier font.
 cryst -- Font for graphical symbols used in crystallography.
 cyklop -- The Cyclop typeface.
 dancers -- Font for Conan Doyle's "The Dancing Men".
 dice -- A font for die faces.
 dictsym -- DictSym font and macro package
 dingbat -- Two dingbat symbol fonts.
 doublestroke -- Typeset mathematical double stroke symbols.
 dozenal -- Typeset documents using base twelve numbering (also called
  "dozenal")
 duerer -- Computer Duerer fonts.
 duerer-latex -- LaTeX support for the Duerer fonts.
 ean -- Macros for making EAN barcodes.
 ecc -- Sources for the European Concrete fonts.
 eco -- Oldstyle numerals using EC fonts.
 eiad -- Traditional style Irish fonts.
 eiad-ltx -- LaTeX support for the eiad font.
 elvish -- Fonts for typesetting Tolkien Elvish scripts.
 epigrafica -- A Greek and Latin font.
 epsdice -- A scalable dice "font".
 esvect -- Vector arrows.
 eulervm -- Euler virtual math fonts.
 euxm --
 feyn -- A font for in-text Feynman diagrams.
 fge -- A font for Frege's Grundgesetze der Arithmetik.
 foekfont -- The title font of the Mads Fok magazine.
 fonetika -- Support for the danish "Dania" phonetic system.
 fourier -- Using Utopia fonts in LaTeX documents.
 fouriernc -- Use New Century Schoolbook text with Fourier maths fonts.
 frcursive -- French cursive hand fonts.
 garamond --
 genealogy -- A compilation genealogy font.
 gfsartemisia -- A modern Greek font design.
 gfsbodoni -- A Greek and Latin font based on Bodoni.
 gfscomplutum -- A Greek font with a long history.
 gfsdidot -- A Greek font based on Didot's work.
 gfsneohellenic -- A Greek font in the Neo-Hellenic style.
 gfssolomos -- A Greek-alphabet font.
 gothic -- A collection of old German-style fonts.
 greenpoint -- The Green Point logo.
 groff --
 grotesq -- the URW Grotesk Bold Font.
 hands -- Pointing hand font.
 hfbright -- The hfbright fonts.
 hfoldsty -- Old style numerals with EC fonts.
 ifsym -- A collection of symbols.
 inconsolata -- A monospaced font, with support files for use with TeX.
 initials -- Adobe Type 1 decorative initial fonts.
 iwona -- A two-element sans-serif font.
 junicode -- A TrueType font for mediaevalists.
 kixfont -- A font for KIX codes.
 knuthotherfonts --
 kpfonts -- A complete set of fonts for text and mathematics.
 kurier -- A two-element sans-serif typeface.
 lettrgth --
 lfb -- A Greek font with normal and bold variants.
 libertine -- Use the font Libertine with LaTeX.
 libris -- Libris ADF fonts, with LaTeX support.
 linearA -- Linear A script fonts.
 logic -- A font for electronic logic design.
 lxfonts -- Set of slide fonts based on CM.
 ly1 -- Support for LY1 LaTeX encoding.
 marigold --
 mathabx -- Three series of mathematical symbols.
 mathdesign -- Mathematical fonts to fit with particular text fonts.
 mnsymbol -- Mathematical symbol font for Adobe MinionPro.
 nkarta -- A "new" version of the karta cartographic fonts.
 ocherokee -- LaTeX Support for the Cherokee language.
 ogham -- Fonts for typesetting Ogham script.
 oinuit -- LaTeX Support for the Inuktitut Language.
 optima --
 orkhun -- A font for orkhun script.
 osmanian -- Osmanian font for writing Somali.
 pacioli -- Fonts designed by Fra Luca de Pacioli in 1497.
 pclnfss -- Font support for current PCL printers.
 phaistos -- Disk of Phaistos font.
 phonetic -- MetaFont Phonetic fonts, based on Computer Modern.
 pigpen -- A font for the pigpen (or masonic) cipher.
 psafm --
 punk -- Donald Knuth's punk font.
 recycle -- A font providing the "recyclable" logo.
 sauter -- Wide range of design sizes for CM fonts.
 sauterfonts -- Use sauter fonts in LaTeX.
 semaphor -- Semaphore alphabet font.
 simpsons -- MetaFont source for Simpsons characters.
 skull -- A font to draw a skull.
 staves -- Typeset Icelandic staves and runic letters.
 tapir -- A simple geometrical font.
 tengwarscript -- LaTeX support for using Tengwar fonts.
 trajan -- Fonts from the Trajan column in Rome.
 umtypewriter -- Fonts to typeset with the xgreek package.
 univers --
 universa -- Herbert Bayer's 'universal' font.
 venturisadf -- Venturis ADF fonts collection.
 wsuipa -- International Phonetic Alphabet fonts.
 yfonts -- Support for old German fonts.
 zefonts -- Virtual fonts to provide T1 encoding from existing fonts."""
    EXAMPLE4 = """Arista is a simple multimedia transcoder, it focuses on \
being easy to use by making complex task of encoding for various devices \
simple.
Users should pick an input and a target device, choose a file to save to and \
go. Features:
* Presets for iPod, computer, DVD player, PSP, Playstation 3, and more.
* Live preview to see encoded quality.
* Automatically discover available DVD media and Video 4 Linux (v4l) devices.
* Rip straight from DVD media easily (requires libdvdcss).
* Rip straight from v4l devices.
* Simple terminal client for scripting.
* Automatic preset updating."""
    def on_clicked(widget, desc_widget, descs):
        widget.position += 1
        if widget.position >= len(descs):
            widget.position = 0
        desc_widget.set_description(*descs[widget.position])
    descs = ((EXAMPLE0, ''),
             (EXAMPLE1, ''),
             (EXAMPLE2, ''),
             (EXAMPLE3, 'texlive-fonts-extra'),
             (EXAMPLE4, ''))
    win = Gtk.Window()
    win.set_default_size(300, 400)
    win.set_has_resize_grip(True)
    vb = Gtk.VBox()
    win.add(vb)
    b = Gtk.Button('Next test description >>')
    b.position = 0
    vb.pack_start(b, False, False, 0)
    scroll = Gtk.ScrolledWindow()
    vb.add(scroll)
    d = AppDescription()
    #~ d.description.DEBUG_PAINT_BBOXES = True
    d.set_description(EXAMPLE0, pkgname='')
    scroll.add_with_viewport(d)
    win.show_all()
    b.connect("clicked", on_clicked, d, descs)
    win.connect('destroy', lambda x: Gtk.main_quit())
    return win
if __name__ == '__main__':
    win = get_test_description_window()
    win.show_all()
    Gtk.main()
 | 
	gpl-3.0 | -5,847,206,272,166,974,000 | 32.071823 | 79 | 0.571584 | false | 
| 
	teddym6/qualitybots | 
	src/appengine/handlers/machine_pool.py | 
	26 | 
	5651 | 
	#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for assisting with the machine install process."""
# Disable 'Import not at top of file' lint error.
# pylint: disable-msg=C6204, C6205, W0611
import logging
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from common import ec2_manager
from common import enum
from handlers import base
from handlers import launch_tasks
from models import client_machine
INIT_START = '/init/start'
INSTALL_FAILED = '/init/install_failed'
INSTALL_SUCEEDED = '/init/install_succeeded'
class InitializationStart(base.BaseHandler):
  """Handler to acknowledge a machine starting initialization."""
  # Disable 'Invalid method name' lint error.
  # pylint: disable-msg=C6409
  def get(self):
    """Updates the status of a machine starting initialization."""
    instance_id = self.GetRequiredParameter('instance_id')
    instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1',
                           instance_id).get()
    if not instance:
      logging.error('The given instance id "%s" does not match any machines.',
                    instance_id)
      self.error(500)
      return
    if instance.status != enum.MACHINE_STATUS.PROVISIONED:
      logging.error('The machine with instance id "%s" was in an unexpected '
                    'state for initialization: "%s"', instance_id,
                    enum.MACHINE_STATUS.LookupKey(instance.status))
    instance.status = enum.MACHINE_STATUS.INITIALIZING
    instance.put()
    self.response.out.write('Initialization acknowledged.')
class InstallFailed(base.BaseHandler):
  """Handler to deal with a machine that fails to properly setup and install."""
  # Disable 'Invalid method name' lint error.
  # pylint: disable-msg=C6409
  def post(self):
    """Updates the status of a machine that failed with initialization."""
    instance_id = self.GetRequiredParameter('instance_id')
    log = self.GetOptionalParameter('log', None)
    old_instance = db.GqlQuery(
        'SELECT * FROM ClientMachine WHERE client_id = :1',
        instance_id).get()
    if not old_instance:
      logging.error('The given instance id "%s" does not match any machines.',
                    instance_id)
      self.error(500)
      return
    if old_instance.status != enum.MACHINE_STATUS.INITIALIZING:
      logging.error('The machine with instance id "%s" was in an unexpected '
                    'state for initialization: "%s"', instance_id,
                    enum.MACHINE_STATUS.LookupKey(old_instance.status))
    old_instance.status = enum.MACHINE_STATUS.FAILED
    if log:
      old_instance.initialization_log = log
    old_instance.put()
    if old_instance.retry_count >= client_machine.MAX_RETRIES:
      logging.error('Reached the maximum number of retries for starting this '
                    'machine: %s.', str(old_instance.key()))
      logging.info('Terminating the failed instance.')
      deferred.defer(launch_tasks.TerminateFailedMachine, instance_id,
                     _countdown=launch_tasks.DEFAULT_COUNTDOWN,
                     _queue=launch_tasks.DEFAULT_QUEUE)
      self.error(500)
      return
    logging.info('Rebooting the failed instance.')
    deferred.defer(launch_tasks.RebootMachine, instance_id,
                   _countdown=launch_tasks.DEFAULT_COUNTDOWN,
                   _queue=launch_tasks.DEFAULT_QUEUE)
    self.response.out.write('Initialization failure acknowledged.')
class InstallSucceeded(base.BaseHandler):
  """Handler to deal with a machine that installs successfully."""
  # Disable 'Invalid method name' lint error.
  # pylint: disable-msg=C6409
  def post(self):
    """Updates the status of a machine that succeeded with initialization."""
    instance_id = self.GetRequiredParameter('instance_id')
    log = self.GetOptionalParameter('log', None)
    instance = db.GqlQuery('SELECT * FROM ClientMachine WHERE client_id = :1',
                           instance_id).get()
    if not instance:
      logging.error('The given instance id "%s" does not match any machines.',
                    instance_id)
      self.error(500)
      return
    if instance.status != enum.MACHINE_STATUS.INITIALIZING:
      logging.error('The machine with instance id "%s" was in an unexpected '
                    'state for initialization: "%s"', instance_id,
                    enum.MACHINE_STATUS.LookupKey(instance.status))
    instance.status = enum.MACHINE_STATUS.RUNNING
    if log:
      instance.initialization_log = log
    instance.put()
    self.response.out.write('Initialization success acknowledged.')
application = webapp.WSGIApplication(
    [(INIT_START, InitializationStart),
     (INSTALL_FAILED, InstallFailed),
     (INSTALL_SUCEEDED, InstallSucceeded)],
    debug=True)
def main():
  run_wsgi_app(application)
if __name__ == '__main__':
  main()
 | 
	apache-2.0 | 5,529,648,068,327,806,000 | 32.636905 | 80 | 0.682357 | false | 
| 
	aspiers/pacemaker | 
	cts/CM_ais.py | 
	15 | 
	5946 | 
	'''CTS: Cluster Testing System: AIS dependent modules...
'''
__copyright__ = '''
Copyright (C) 2007 Andrew Beekhof <[email protected]>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
from cts.CTSvars import *
from cts.CM_lha  import crm_lha
from cts.CTS     import Process
from cts.patterns    import PatternSelector
#######################################################################
#
#  LinuxHA v2 dependent modules
#
#######################################################################
class crm_ais(crm_lha):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of openais
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-ais"
        crm_lha.__init__(self, Environment, randseed=randseed, name=name)
        self.fullcomplist = {}
        self.templates = PatternSelector(self.name)
    def NodeUUID(self, node):
        return node
    def ais_components(self, extra={}):
        complist = []
        if not len(self.fullcomplist.keys()):
            for c in ["cib", "lrmd", "crmd", "attrd" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))
            # pengine uses dc_pats instead of pats
            self.fullcomplist["pengine"] = Process(
                self, "pengine", 
                dc_pats = self.templates.get_component(self.name, "pengine"),
                badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))
            # stonith-ng's process name is different from its component name
            self.fullcomplist["stonith-ng"] = Process(
                self, "stonith-ng", process="stonithd", 
                pats = self.templates.get_component(self.name, "stonith"),
                badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"),
                common_ignore = self.templates.get_component(self.name, "common-ignore"))
            # add (or replace) any extra components passed in
            self.fullcomplist.update(extra)
        # Processes running under valgrind can't be shot with "killall -9 processname",
        # so don't include them in the returned list
        vgrind = self.Env["valgrind-procs"].split()
        for key in list(self.fullcomplist.keys()):
            if self.Env["valgrind-tests"]:
                if key in vgrind:
                    self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                    continue
            if key == "stonith-ng" and not self.Env["DoFencing"]:
                continue
            complist.append(self.fullcomplist[key])
        return complist
class crm_cs_v0(crm_ais):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running against version 0 of our plugin
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-plugin-v0"
        crm_ais.__init__(self, Environment, randseed=randseed, name=name)
    def Components(self):
        extra = {}
        extra["corosync"] = Process(
            self, "corosync", 
            pats = self.templates.get_component(self.name, "corosync"),
            badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"),
            common_ignore = self.templates.get_component(self.name, "common-ignore")
        )
        return self.ais_components(extra=extra)
class crm_cs_v1(crm_cs_v0):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of version 1 of our plugin
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-plugin-v1"
        crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
class crm_mcp(crm_cs_v0):
    '''
    The crm version 4 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of native corosync (no plugins)
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-mcp"
        crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
        if self.Env["have_systemd"]:
            self.update({
                # When systemd is in use, we can look for this instead
                "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting normally",
            })
class crm_cman(crm_cs_v0):
    '''
    The crm version 3 cluster manager class.
    It implements the things we need to talk to and manipulate
    crm clusters running on top of openais
    '''
    def __init__(self, Environment, randseed=None, name=None):
        if not name: name="crm-cman"
        crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
 | 
	gpl-2.0 | 2,421,423,930,139,020,000 | 37.61039 | 110 | 0.61554 | false | 
| 
	jlguardi/yowsup | 
	yowsup/layers/protocol_media/protocolentities/builder_message_media_downloadable.py | 
	17 | 
	1886 | 
	# from yowsup.layers.protocol_media import mediacipher
import tempfile
import os
class DownloadableMediaMessageBuilder(object):
    def __init__(self, downloadbleMediaMessageClass, jid, filepath):
        self.jid = jid
        self.filepath = filepath
        self.encryptedFilepath = None
        self.cls = downloadbleMediaMessageClass
        self.mediaKey = None
        self.attributes = {}
        self.mediaType = self.cls.__name__.split("DownloadableMediaMessageProtocolEntity")[0].lower() #ugly ?
    # def encrypt(self):
    #     fd, encpath = tempfile.mkstemp()
    #     mediaKey = os.urandom(112)
    #     keys = mediacipher.getDerivedKeys(mediaKey)
    #     out = mediacipher.encryptImage(self.filepath, keys)
    #     with open(encImagePath, 'w') as outF:
    #         outF.write(out)
    #
    #     self.mediaKey = mediaKey
    #     self.encryptedFilepath = encpath
    # def decrypt(self):
    #     self.mediaKey = None
    #     self.encryptedFilePath = None
    def setEncryptionData(self, mediaKey, encryptedFilepath):
        self.mediaKey = mediaKey
        self.encryptedFilepath = encryptedFilepath
    def isEncrypted(self):
        return self.encryptedFilepath is not None
    def getFilepath(self):
        return self.encryptedFilepath or self.filepath
    def getOriginalFilepath(self):
        return self.filepath
    def set(self, key, val):
        self.attributes[key] = val
    def get(self, key, default = None):
        if key in self.attributes and self.attributes[key] is not None:
            return self.attributes[key]
        return default
    def getOrSet(self, key, func):
        if not self.get(key):
            self.set(key, func())
    def build(self, url = None, ip = None):
        if url:
            self.set("url", url)
        if ip:
            self.set("ip", ip)
        return self.cls.fromBuilder(self)
 | 
	gpl-3.0 | 8,093,133,399,139,990,000 | 29.918033 | 109 | 0.627253 | false | 
| 
	damorim/compilers-cin | 
	2020_3/projeto2/antlr4-python3-runtime-4.7.2/src/antlr4/atn/ATNDeserializer.py | 
	9 | 
	22186 | 
	# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from uuid import UUID
from io import StringIO
from typing import Callable
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import *
from antlr4.atn.Transition import *
from antlr4.atn.LexerAction import *
from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
# This is the earliest supported serialized UUID.
BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E")
# This UUID indicates the serialized ATN contains two sets of
# IntervalSets, where the second set's values are encoded as
# 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089")
# This list contains all of the currently supported UUIDs, ordered by when
# the feature first appeared in this branch.
SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]
SERIALIZED_VERSION = 3
# This is the current serialized UUID.
SERIALIZED_UUID = ADDED_UNICODE_SMP
class ATNDeserializer (object):
    def __init__(self, options : ATNDeserializationOptions = None):
        if options is None:
            options = ATNDeserializationOptions.defaultOptions
        self.deserializationOptions = options
    # Determines if a particular serialized representation of an ATN supports
    # a particular feature, identified by the {@link UUID} used for serializing
    # the ATN at the time the feature was first introduced.
    #
    # @param feature The {@link UUID} marking the first time the feature was
    # supported in the serialized ATN.
    # @param actualUuid The {@link UUID} of the actual serialized ATN which is
    # currently being deserialized.
    # @return {@code true} if the {@code actualUuid} value represents a
    # serialized ATN at or after the feature identified by {@code feature} was
    # introduced; otherwise, {@code false}.
    def isFeatureSupported(self, feature : UUID , actualUuid : UUID ):
        idx1 = SUPPORTED_UUIDS.index(feature)
        if idx1<0:
            return False
        idx2 = SUPPORTED_UUIDS.index(actualUuid)
        return idx2 >= idx1
    def deserialize(self, data : str):
        self.reset(data)
        self.checkVersion()
        self.checkUUID()
        atn = self.readATN()
        self.readStates(atn)
        self.readRules(atn)
        self.readModes(atn)
        sets = []
        # First, read all sets with 16-bit Unicode code points <= U+FFFF.
        self.readSets(atn, sets, self.readInt)
        # Next, if the ATN was serialized with the Unicode SMP feature,
        # deserialize sets with 32-bit arguments <= U+10FFFF.
        if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid):
            self.readSets(atn, sets, self.readInt32)
        self.readEdges(atn, sets)
        self.readDecisions(atn)
        self.readLexerActions(atn)
        self.markPrecedenceDecisions(atn)
        self.verifyATN(atn)
        if self.deserializationOptions.generateRuleBypassTransitions \
                and atn.grammarType == ATNType.PARSER:
            self.generateRuleBypassTransitions(atn)
            # re-verify after modification
            self.verifyATN(atn)
        return atn
    def reset(self, data:str):
        def adjust(c):
            v = ord(c)
            return v-2 if v>1 else v + 65533
        temp = [ adjust(c) for c in data ]
        # don't adjust the first value since that's the version number
        temp[0] = ord(data[0])
        self.data = temp
        self.pos = 0
    def checkVersion(self):
        version = self.readInt()
        if version != SERIALIZED_VERSION:
            raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").")
    def checkUUID(self):
        uuid = self.readUUID()
        if not uuid in SUPPORTED_UUIDS:
            raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \
                            " (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID)
        self.uuid = uuid
    def readATN(self):
        idx = self.readInt()
        grammarType = ATNType.fromOrdinal(idx)
        maxTokenType = self.readInt()
        return ATN(grammarType, maxTokenType)
    def readStates(self, atn:ATN):
        loopBackStateNumbers = []
        endStateNumbers = []
        nstates = self.readInt()
        for i in range(0, nstates):
            stype = self.readInt()
            # ignore bad type of states
            if stype==ATNState.INVALID_TYPE:
                atn.addState(None)
                continue
            ruleIndex = self.readInt()
            if ruleIndex == 0xFFFF:
                ruleIndex = -1
            s = self.stateFactory(stype, ruleIndex)
            if stype == ATNState.LOOP_END: # special case
                loopBackStateNumber = self.readInt()
                loopBackStateNumbers.append((s, loopBackStateNumber))
            elif isinstance(s, BlockStartState):
                endStateNumber = self.readInt()
                endStateNumbers.append((s, endStateNumber))
            atn.addState(s)
        # delay the assignment of loop back and end states until we know all the state instances have been initialized
        for pair in loopBackStateNumbers:
            pair[0].loopBackState = atn.states[pair[1]]
        for pair in endStateNumbers:
            pair[0].endState = atn.states[pair[1]]
        numNonGreedyStates = self.readInt()
        for i in range(0, numNonGreedyStates):
            stateNumber = self.readInt()
            atn.states[stateNumber].nonGreedy = True
        numPrecedenceStates = self.readInt()
        for i in range(0, numPrecedenceStates):
            stateNumber = self.readInt()
            atn.states[stateNumber].isPrecedenceRule = True
    def readRules(self, atn:ATN):
        nrules = self.readInt()
        if atn.grammarType == ATNType.LEXER:
            atn.ruleToTokenType = [0] * nrules
        atn.ruleToStartState = [0] * nrules
        for i in range(0, nrules):
            s = self.readInt()
            startState = atn.states[s]
            atn.ruleToStartState[i] = startState
            if atn.grammarType == ATNType.LEXER:
                tokenType = self.readInt()
                if tokenType == 0xFFFF:
                    tokenType = Token.EOF
                atn.ruleToTokenType[i] = tokenType
        atn.ruleToStopState = [0] * nrules
        for state in atn.states:
            if not isinstance(state, RuleStopState):
                continue
            atn.ruleToStopState[state.ruleIndex] = state
            atn.ruleToStartState[state.ruleIndex].stopState = state
    def readModes(self, atn:ATN):
        nmodes = self.readInt()
        for i in range(0, nmodes):
            s = self.readInt()
            atn.modeToStartState.append(atn.states[s])
    def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]):
        m = self.readInt()
        for i in range(0, m):
            iset = IntervalSet()
            sets.append(iset)
            n = self.readInt()
            containsEof = self.readInt()
            if containsEof!=0:
                iset.addOne(-1)
            for j in range(0, n):
                i1 = readUnicode()
                i2 = readUnicode()
                iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive
    def readEdges(self, atn:ATN, sets:list):
        nedges = self.readInt()
        for i in range(0, nedges):
            src = self.readInt()
            trg = self.readInt()
            ttype = self.readInt()
            arg1 = self.readInt()
            arg2 = self.readInt()
            arg3 = self.readInt()
            trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
            srcState = atn.states[src]
            srcState.addTransition(trans)
        # edges for rule stop states can be derived, so they aren't serialized
        for state in atn.states:
            for i in range(0, len(state.transitions)):
                t = state.transitions[i]
                if not isinstance(t, RuleTransition):
                    continue
                outermostPrecedenceReturn = -1
                if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule:
                    if t.precedence == 0:
                        outermostPrecedenceReturn = t.target.ruleIndex
                trans = EpsilonTransition(t.followState, outermostPrecedenceReturn)
                atn.ruleToStopState[t.target.ruleIndex].addTransition(trans)
        for state in atn.states:
            if isinstance(state, BlockStartState):
                # we need to know the end state to set its start state
                if state.endState is None:
                    raise Exception("IllegalState")
                # block end states can only be associated to a single block start state
                if state.endState.startState is not None:
                    raise Exception("IllegalState")
                state.endState.startState = state
            if isinstance(state, PlusLoopbackState):
                for i in range(0, len(state.transitions)):
                    target = state.transitions[i].target
                    if isinstance(target, PlusBlockStartState):
                        target.loopBackState = state
            elif isinstance(state, StarLoopbackState):
                for i in range(0, len(state.transitions)):
                    target = state.transitions[i].target
                    if isinstance(target, StarLoopEntryState):
                        target.loopBackState = state
    def readDecisions(self, atn:ATN):
        ndecisions = self.readInt()
        for i in range(0, ndecisions):
            s = self.readInt()
            decState = atn.states[s]
            atn.decisionToState.append(decState)
            decState.decision = i
    def readLexerActions(self, atn:ATN):
        if atn.grammarType == ATNType.LEXER:
            count = self.readInt()
            atn.lexerActions = [ None ] * count
            for i in range(0, count):
                actionType = self.readInt()
                data1 = self.readInt()
                if data1 == 0xFFFF:
                    data1 = -1
                data2 = self.readInt()
                if data2 == 0xFFFF:
                    data2 = -1
                lexerAction = self.lexerActionFactory(actionType, data1, data2)
                atn.lexerActions[i] = lexerAction
    def generateRuleBypassTransitions(self, atn:ATN):
        count = len(atn.ruleToStartState)
        atn.ruleToTokenType = [ 0 ] * count
        for i in range(0, count):
            atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
        for i in range(0, count):
            self.generateRuleBypassTransition(atn, i)
    def generateRuleBypassTransition(self, atn:ATN, idx:int):
        bypassStart = BasicBlockStartState()
        bypassStart.ruleIndex = idx
        atn.addState(bypassStart)
        bypassStop = BlockEndState()
        bypassStop.ruleIndex = idx
        atn.addState(bypassStop)
        bypassStart.endState = bypassStop
        atn.defineDecisionState(bypassStart)
        bypassStop.startState = bypassStart
        excludeTransition = None
        if atn.ruleToStartState[idx].isPrecedenceRule:
            # wrap from the beginning of the rule to the StarLoopEntryState
            endState = None
            for state in atn.states:
                if self.stateIsEndStateFor(state, idx):
                    endState = state
                    excludeTransition = state.loopBackState.transitions[0]
                    break
            if excludeTransition is None:
                raise Exception("Couldn't identify final state of the precedence rule prefix section.")
        else:
            endState = atn.ruleToStopState[idx]
        # all non-excluded transitions that currently target end state need to target blockEnd instead
        for state in atn.states:
            for transition in state.transitions:
                if transition == excludeTransition:
                    continue
                if transition.target == endState:
                    transition.target = bypassStop
        # all transitions leaving the rule start state need to leave blockStart instead
        ruleToStartState = atn.ruleToStartState[idx]
        count = len(ruleToStartState.transitions)
        while count > 0:
            bypassStart.addTransition(ruleToStartState.transitions[count-1])
            del ruleToStartState.transitions[-1]
        # link the new states
        atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart))
        bypassStop.addTransition(EpsilonTransition(endState))
        matchState = BasicState()
        atn.addState(matchState)
        matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx]))
        bypassStart.addTransition(EpsilonTransition(matchState))
    def stateIsEndStateFor(self, state:ATNState, idx:int):
        if state.ruleIndex != idx:
            return None
        if not isinstance(state, StarLoopEntryState):
            return None
        maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
        if not isinstance(maybeLoopEndState, LoopEndState):
            return None
        if maybeLoopEndState.epsilonOnlyTransitions and \
                isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
            return state
        else:
            return None
    #
    # Analyze the {@link StarLoopEntryState} states in the specified ATN to set
    # the {@link StarLoopEntryState#isPrecedenceDecision} field to the
    # correct value.
    #
    # @param atn The ATN.
    #
    def markPrecedenceDecisions(self, atn:ATN):
        for state in atn.states:
            if not isinstance(state, StarLoopEntryState):
                continue
            # We analyze the ATN to determine if this ATN decision state is the
            # decision for the closure block that determines whether a
            # precedence rule should continue or complete.
            #
            if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule:
                maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
                if isinstance(maybeLoopEndState, LoopEndState):
                    if maybeLoopEndState.epsilonOnlyTransitions and \
                            isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
                        state.isPrecedenceDecision = True
    def verifyATN(self, atn:ATN):
        if not self.deserializationOptions.verifyATN:
            return
        # verify assumptions
        for state in atn.states:
            if state is None:
                continue
            self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1)
            if isinstance(state, PlusBlockStartState):
                self.checkCondition(state.loopBackState is not None)
            if isinstance(state, StarLoopEntryState):
                self.checkCondition(state.loopBackState is not None)
                self.checkCondition(len(state.transitions) == 2)
                if isinstance(state.transitions[0].target, StarBlockStartState):
                    self.checkCondition(isinstance(state.transitions[1].target, LoopEndState))
                    self.checkCondition(not state.nonGreedy)
                elif isinstance(state.transitions[0].target, LoopEndState):
                    self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState))
                    self.checkCondition(state.nonGreedy)
                else:
                    raise Exception("IllegalState")
            if isinstance(state, StarLoopbackState):
                self.checkCondition(len(state.transitions) == 1)
                self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState))
            if isinstance(state, LoopEndState):
                self.checkCondition(state.loopBackState is not None)
            if isinstance(state, RuleStartState):
                self.checkCondition(state.stopState is not None)
            if isinstance(state, BlockStartState):
                self.checkCondition(state.endState is not None)
            if isinstance(state, BlockEndState):
                self.checkCondition(state.startState is not None)
            if isinstance(state, DecisionState):
                self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0)
            else:
                self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState))
    def checkCondition(self, condition:bool, message=None):
        if not condition:
            if message is None:
                message = "IllegalState"
            raise Exception(message)
    def readInt(self):
        i = self.data[self.pos]
        self.pos += 1
        return i
    def readInt32(self):
        low = self.readInt()
        high = self.readInt()
        return low | (high << 16)
    def readLong(self):
        low = self.readInt32()
        high = self.readInt32()
        return (low & 0x00000000FFFFFFFF) | (high << 32)
    def readUUID(self):
        low = self.readLong()
        high = self.readLong()
        allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64)
        return UUID(int=allBits)
    edgeFactories = [ lambda args : None,
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        RuleTransition(atn.states[arg1], arg2, arg3, target),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        PredicateTransition(target, arg1, arg2, arg3 != 0),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        ActionTransition(target, arg1, arg2, arg3 != 0),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        SetTransition(target, sets[arg1]),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        NotSetTransition(target, sets[arg1]),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        WildcardTransition(target),
                      lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
                        PrecedencePredicateTransition(target, arg1)
                      ]
    def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list):
        target = atn.states[trg]
        if type > len(self.edgeFactories) or self.edgeFactories[type] is None:
            raise Exception("The specified transition type: " + str(type) + " is not valid.")
        else:
            return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target)
    stateFactories = [  lambda : None,
                        lambda : BasicState(),
                        lambda : RuleStartState(),
                        lambda : BasicBlockStartState(),
                        lambda : PlusBlockStartState(),
                        lambda : StarBlockStartState(),
                        lambda : TokensStartState(),
                        lambda : RuleStopState(),
                        lambda : BlockEndState(),
                        lambda : StarLoopbackState(),
                        lambda : StarLoopEntryState(),
                        lambda : PlusLoopbackState(),
                        lambda : LoopEndState()
                    ]
    def stateFactory(self, type:int, ruleIndex:int):
        if type> len(self.stateFactories) or self.stateFactories[type] is None:
            raise Exception("The specified state type " + str(type) + " is not valid.")
        else:
            s = self.stateFactories[type]()
            if s is not None:
                s.ruleIndex = ruleIndex
        return s
    CHANNEL = 0     #The type of a {@link LexerChannelAction} action.
    CUSTOM = 1      #The type of a {@link LexerCustomAction} action.
    MODE = 2        #The type of a {@link LexerModeAction} action.
    MORE = 3        #The type of a {@link LexerMoreAction} action.
    POP_MODE = 4    #The type of a {@link LexerPopModeAction} action.
    PUSH_MODE = 5   #The type of a {@link LexerPushModeAction} action.
    SKIP = 6        #The type of a {@link LexerSkipAction} action.
    TYPE = 7        #The type of a {@link LexerTypeAction} action.
    actionFactories = [ lambda data1, data2: LexerChannelAction(data1),
                        lambda data1, data2: LexerCustomAction(data1, data2),
                        lambda data1, data2: LexerModeAction(data1),
                        lambda data1, data2: LexerMoreAction.INSTANCE,
                        lambda data1, data2: LexerPopModeAction.INSTANCE,
                        lambda data1, data2: LexerPushModeAction(data1),
                        lambda data1, data2: LexerSkipAction.INSTANCE,
                        lambda data1, data2: LexerTypeAction(data1)
                      ]
    def lexerActionFactory(self, type:int, data1:int, data2:int):
        if type > len(self.actionFactories) or self.actionFactories[type] is None:
            raise Exception("The specified lexer action type " + str(type) + " is not valid.")
        else:
            return self.actionFactories[type](data1, data2)
 | 
	mit | -1,459,583,157,344,531,000 | 41.018939 | 134 | 0.594204 | false | 
| 
	ppiotr/Invenio | 
	modules/miscutil/lib/upgrades/invenio_2012_11_27_new_selfcite_tables.py | 
	24 | 
	1666 | 
	# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
    return "New selfcite tables"
def do_upgrade():
    run_sql("""
    CREATE TABLE IF NOT EXISTS `rnkRECORDSCACHE` (
      `id_bibrec` int(10) unsigned NOT NULL,
      `authorid` bigint(10) NOT NULL,
      PRIMARY KEY (`id_bibrec`,`authorid`)
    ) ENGINE=MyISAM""")
    run_sql("""
    CREATE TABLE IF NOT EXISTS `rnkEXTENDEDAUTHORS` (
      `id` int(10) unsigned NOT NULL,
      `authorid` bigint(10) NOT NULL,
      PRIMARY KEY (`id`,`authorid`)
    ) ENGINE=MyISAM""")
    run_sql("""
    CREATE TABLE IF NOT EXISTS `rnkSELFCITES` (
      `id_bibrec` int(10) unsigned NOT NULL,
      `count` int(10) unsigned NOT NULL,
      `references` text NOT NULL,
      `last_updated` datetime NOT NULL,
      PRIMARY KEY (`id_bibrec`)
    ) ENGINE=MyISAM""")
def estimate():
    return 1
 | 
	gpl-2.0 | 241,791,155,171,128,740 | 31.038462 | 75 | 0.671068 | false | 
| 
	dvliman/jaikuengine | 
	.google_appengine/lib/django-0.96/django/newforms/extras/widgets.py | 
	32 | 
	2008 | 
	"""
Extra HTML Widget classes
"""
from django.newforms.widgets import Widget, Select
from django.utils.dates import MONTHS
import datetime
__all__ = ('SelectDateWidget',)
class SelectDateWidget(Widget):
    """
    A Widget that splits date input into three <select> boxes.
    This also serves as an example of a Widget that has more than one HTML
    element and hence implements value_from_datadict.
    """
    month_field = '%s_month'
    day_field = '%s_day'
    year_field = '%s_year'
    def __init__(self, attrs=None, years=None):
        # years is an optional list/tuple of years to use in the "year" select box.
        self.attrs = attrs or {}
        if years:
            self.years = years
        else:
            this_year = datetime.date.today().year
            self.years = range(this_year, this_year+10)
    def render(self, name, value, attrs=None):
        try:
            value = datetime.date(*map(int, value.split('-')))
            year_val, month_val, day_val = value.year, value.month, value.day
        except (AttributeError, TypeError, ValueError):
            year_val = month_val = day_val = None
        output = []
        month_choices = MONTHS.items()
        month_choices.sort()
        select_html = Select(choices=month_choices).render(self.month_field % name, month_val)
        output.append(select_html)
        day_choices = [(i, i) for i in range(1, 32)]
        select_html = Select(choices=day_choices).render(self.day_field % name, day_val)
        output.append(select_html)
        year_choices = [(i, i) for i in self.years]
        select_html = Select(choices=year_choices).render(self.year_field % name, year_val)
        output.append(select_html)
        return u'\n'.join(output)
    def value_from_datadict(self, data, name):
        y, m, d = data.get(self.year_field % name), data.get(self.month_field % name), data.get(self.day_field % name)
        if y and m and d:
            return '%s-%s-%s' % (y, m, d)
        return None
 | 
	apache-2.0 | 8,298,886,374,793,652,000 | 33.033898 | 118 | 0.609562 | false | 
| 
	alextruberg/custom_django | 
	django/contrib/gis/db/backends/mysql/introspection.py | 
	624 | 
	1426 | 
	from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
    # Updating the data_types_reverse dictionary with the appropriate
    # type for Geometry fields.
    data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
    data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
    def get_geometry_type(self, table_name, geo_col):
        cursor = self.connection.cursor()
        try:
            # In order to get the specific geometry type of the field,
            # we introspect on the table definition using `DESCRIBE`.
            cursor.execute('DESCRIBE %s' %
                           self.connection.ops.quote_name(table_name))
            # Increment over description info until we get to the geometry
            # column.
            for column, typ, null, key, default, extra in cursor.fetchall():
                if column == geo_col:
                    # Using OGRGeomType to convert from OGC name to Django field.
                    # MySQL does not support 3D or SRIDs, so the field params
                    # are empty.
                    field_type = OGRGeomType(typ).django
                    field_params = {}
                    break
        finally:
            cursor.close()
        return field_type, field_params
 | 
	bsd-3-clause | -8,190,172,623,721,746,000 | 43.5625 | 81 | 0.620617 | false | 
| 
	desarrollosimagos/svidb | 
	administrativo/perfil/models.py | 
	1 | 
	12346 | 
	#!/usr/bin/python -u
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
from mapas.models import *
from actores.models import *
class PerfilPublico(models.Model):
    user = models.OneToOneField(User,verbose_name='Usuario')
    persona = models.OneToOneField(Directorios)
    class Meta:
        db_table = u'perfilpublico'
        verbose_name_plural='Perfil Público'
        verbose_name='Perfil Público'
        unique_together=('user','persona')
        #app_label = 'Sistematizacion_de_modulos_publicos'
    def __unicode__(self):
        return u"%s" %(self.persona.nombre)
		
class SeccionesPanelPublico(models.Model):
    panel = models.CharField(max_length=180,verbose_name='Modulo')
    descripcion = models.TextField()
#    modulos = models.ManyToManyField(ModulosPublicos,related_name='Modulos Principales',verbose_name='Modulos',blank=True)
    activo = models.BooleanField(verbose_name="Activo")
    is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
    posicion = models.IntegerField(verbose_name="Posicion")
    class Meta:
        verbose_name_plural='Secciones del Panel Publico'
        verbose_name='Secciones del Panel Publico'
    def __unicode__(self):
        return u"%s" %(self.panel)
class ModulosPublicos(models.Model):
    paneles = models.ForeignKey(SeccionesPanelPublico)
    modulo = models.CharField(max_length=180,verbose_name='Modulo')
    url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True)
    boton = models.ImageField(upload_to='modulos')
#    submodulos = models.ManyToManyField(SubModulosPublicos,related_name='Submodulos',verbose_name='Sub Modulos',blank=True)
    descripcion = models.TextField()
    is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
    activo = models.BooleanField(verbose_name="Activo")
    posicion = models.IntegerField(verbose_name="Posicion")
    target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo')
    class Meta:
        verbose_name_plural='Módulos Públicos'
        verbose_name='Módulos Públicos'
        #app_label = 'Sistematizacion_de_modulos_publicos'
    def __unicode__(self):
        return u"%s - %s" %(self.paneles.panel, self.modulo)
		
    def logo(self):
        logo = ""
        if self.boton:
           esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>"
        else:
           esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen"
        return u"%s"%(esta)
    logo.allow_tags = True
class SubModulosPublicos(models.Model):
    modulos = models.ForeignKey(ModulosPublicos)
    titulo = models.CharField(max_length=180,verbose_name='Modulo')
    url = models.CharField(max_length=180,verbose_name='URL',blank=True,null=True)
    boton = models.ImageField(upload_to='modulos') 	  
    descripcion = models.TextField()
    is_admmin = models.BooleanField(verbose_name="Solo para Administradores")
    activo = models.BooleanField(verbose_name="Activo")
    posicion = models.IntegerField(verbose_name="Posicion")
    target = models.CharField(max_length=40,choices=(('_blank',u'Abre el documento vinculado en una nueva ventana o pestaña'),('_self',u'Abre el documento vinculado en el mismo marco que se ha hecho clic'),('_parent',u'Abre el documento vinculado en el marco padre'),('_top',u'Abre el documento vinculado en el pleno de la ventana')),verbose_name='Target del Vinculo')
    class Meta:
        verbose_name_plural='Sub Módulos Públicos'
        verbose_name='Sub Módulos Públicos'
    def __unicode__(self):
        return u"%s %s %s" %(self.modulos.paneles.panel, self.modulos.modulo,self.titulo)
    def logo(self):
        logo = ""
        if self.boton:
           esta = "<img src='" + self.boton.url +"' alt='Activo' height='150px'>"
        else:
           esta = "<img src='/media/imgs/icon-pendiente.gif' alt='Pendiente'> sin imagen"
        return u"%s"%(esta)
    logo.allow_tags = True
class PerfilModulos(models.Model):
    perfil = models.ForeignKey(PerfilPublico)
    modulos = models.ForeignKey(ModulosPublicos,verbose_name='Modulos')
    ver = models.BooleanField(verbose_name="Ver")
    add = models.BooleanField(verbose_name="Agregar")
    edit = models.BooleanField(verbose_name="Modificar")
    activo = models.BooleanField(verbose_name="Activo")
    class Meta:
        db_table = u'perfilmodulos'
        verbose_name_plural='Permisos Perfiles Módulos'
        unique_together=('perfil','modulos','activo')
        verbose_name='Permisos Perfiles Módulos'
        #app_label = 'Sistematizacion_de_modulos_publicos'
    def __unicode__(self):
        return u"%s %s" %(self.perfil.persona.nombre,self.modulos.modulo)
		
class PerfilSubModulos(models.Model):
    perfil = models.ForeignKey(PerfilPublico)
    submodulos = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulos')
    ver = models.BooleanField(verbose_name="Ver")
    add = models.BooleanField(verbose_name="Agregar")
    edit = models.BooleanField(verbose_name="Modificar")
    activo = models.BooleanField(verbose_name="Activo")
    class Meta:
        verbose_name_plural='Permisos Perfiles Sub Módulos'
        verbose_name='Permisos Perfil Sub Módulos'
        unique_together=('perfil','submodulos','activo')
        #app_label = 'Sistematizacion_de_modulos_publicos'
    def __unicode__(self):
        return u"%s %s" %(self.perfil.persona.nombre,self.submodulos.titulo)
		
#class PerfilPaneles(models.Model):
#    perfil = models.ForeignKey(PerfilPublico)
#    modulos = models.ManyToManyField(SeccionesPanelPublico,verbose_name='Paneles')
#    class Meta:
#        verbose_name_plural='Perfil Paneles'
#        verbose_name='Perfil Paneles'
#    def __unicode__(self):
#        return u"%s %s" %(self.perfil.persona.nombre,self.perfil.persona.documentoidentidad)
class TipoSolicitud(models.Model):
    tipo = models.CharField(max_length=180,verbose_name='Tipo')
    descripcion = models.TextField()
    class Meta:
        verbose_name_plural='Tipo de Solicitud'
        verbose_name='Tipo de Solicitud'
    def __unicode__(self):
        return u"%s" %(self.tipo)
		
		
class SistemaSolicitudes(models.Model):
    remi = models.ForeignKey(Directorios,verbose_name='Remitente')
    tipoSolicitud = models.ForeignKey(TipoSolicitud,verbose_name='Tipo de Solicitud',blank=True, null = True)
    destino = models.ManyToManyField(Directorios, related_name='destinodirect',verbose_name='Destinatarios',blank=True, null = True)
    destinoinst = models.ManyToManyField(Actores, related_name='destinoactor',verbose_name='Destinatarios Instituciones',blank=True, null = True)
    asunto = models.CharField(max_length=120,blank=True,null=True)
    mensaje = models.TextField(blank=True,null=True)
    fecha = models.DateTimeField(default=datetime.now(),editable = False)
    fechainicio = models.DateTimeField(verbose_name='Fecha de Inicio',blank=True,null=True)
    fechaentrega = models.DateTimeField(verbose_name='Fecha de Entrega',blank=True,null=True)
    fechaculminacion = models.DateTimeField(verbose_name='Fecha de Culminación',blank=True,null=True)
    fechaprorroga = models.DateTimeField(verbose_name='Prorroga',blank=True,null=True)
    proyect = models.BooleanField(verbose_name='Es Proyectable?')
    estrucorg = models.TextField(verbose_name='Recursos', blank=True, null=True)
    personasinvol = models.ManyToManyField(Directorios, related_name='persoinvol',verbose_name='Personas Involucradas',blank=True, null = True)
    personasinvoltext = models.TextField(verbose_name='Personas Involucradas, no registradas', blank=True, null=True)
    instituinvol = models.ManyToManyField(Actores, related_name='instiinvol',verbose_name='Instituciones Involucradas',blank=True, null = True)
    instituinvoltext = models.TextField(verbose_name='Institutos Involucrados, no registrados', blank=True, null=True)
	
    especies = models.ManyToManyField(Taxon, related_name='tax',verbose_name='Especies Involucradas',blank=True, null = True)
    especiestext = models.TextField(verbose_name='Especies Involucradas, no registradas', blank=True, null=True)
    areas = models.ManyToManyField(Areas, related_name='ar',verbose_name='Areas Involucradas',blank=True, null = True)
    areastext = models.TextField(verbose_name='Areas Involucradas, no registradas', blank=True, null=True)
	
    datos = models.FileField(upload_to='solicitudes',verbose_name='Datos Adjuntos',blank=True,null=True)
    prioridad = models.IntegerField(choices=((0,'Urgente'),(1,'Normal'),(2,'Especial')),verbose_name='Prioridad',null=True,blank=True)
    estatu = models.IntegerField(choices=((0,'Abierto'),(1,'Cerrado'),(2,'Pausado')),verbose_name='Estatus',null=True,blank=True,db_column='estatu_id')
    class Meta:
        verbose_name_plural='Sistema de Solicitudes'
        #app_label = 'Datos_Transversales' 
        verbose_name = 'Sistema de Solicitudes'
    def __unicode__(self):
        return u" %s %s"%(self.remi,self.estatu)
		
#    def VerEspecies(self):
#        try:
#           espe = Taxon.objects.get(detalletaxon=self)
#        except Taxon.DoesNotExist:
#           espe = None
#        return u"<a href='/manager/especies/taxon/%s'>Ver Taxon</a>"%(tax.id)
#    VerTaxon.allow_tags = True
class Seguimiento(models.Model):
    solicitud = models.ForeignKey(SistemaSolicitudes,verbose_name='Solicitud',blank=True, null = True)
    persona = models.ForeignKey(Directorios,verbose_name='Persona',blank=True, null = True,editable = False)
    mensaje = models.TextField()
    fecha = models.DateTimeField(default=datetime.now(),editable = False)
    class Meta:
        verbose_name_plural='Seguimiento'
        verbose_name='Seguimiento'
    def __unicode__(self):
        return u"%s" %(self.solicitud)
class validaciones(models.Model):
    usuario = models.ForeignKey(PerfilPublico,verbose_name='Usuario')
    codigo = models.CharField(max_length=120)
    estatu = models.IntegerField(choices=((0,'Validacion'),(1,'Recuperacion'),(2,'Eliminacion')),verbose_name='Tipo',null=True,blank=True)
    fecha = models.DateTimeField(default=datetime.now(),editable = False)
    estado = models.BooleanField(verbose_name="Activo")
    class Meta:
        verbose_name_plural='Validacion de Cuentas'
        #app_label = 'Datos_Transversales' 
        verbose_name = 'Validacion de Cuentas'
    def __unicode__(self):
        return u" %s %s"%(self.usuario,self.estatu)
		
class GruposPermisos(models.Model):
    nombre = models.CharField(max_length=120)
    estado = models.BooleanField(verbose_name="Activo")
    class Meta:
        verbose_name_plural='Grupos de Permisos de Perfil'
        verbose_name = 'Grupos de Permisos de Perfil'
    def __unicode__(self):
        return u" %s %s"%(self.nombre,self.estado)
		
class DetalleGruposPermisos(models.Model):
    grupo = models.ForeignKey(GruposPermisos,verbose_name='Grupo')
    seccion = models.ForeignKey(SeccionesPanelPublico,verbose_name='Panel')
    modulo = ChainedForeignKey(ModulosPublicos,chained_field="seccion",chained_model_field="paneles",show_all=False,auto_choose=True,verbose_name='Modulo',null=True,blank=True)
    #modulo = models.ForeignKey(ModulosPublicos,verbose_name='Modulo')
    submodulo = ChainedForeignKey(SubModulosPublicos,chained_field="modulo",chained_model_field="modulos",show_all=False,auto_choose=True,verbose_name='SubModulo',null=True,blank=True)
    #submodulo = models.ForeignKey(SubModulosPublicos,verbose_name='SubModulo')
    estado = models.BooleanField(verbose_name="Activo")
    class Meta:
        verbose_name_plural='Detalle Grupos de Permisos de Perfil'
        verbose_name = 'Detalle Grupos de Permisos de Perfil'
    def __unicode__(self):
        return u" %s %s"%(self.grupo,self.estado)
 | 
	gpl-3.0 | 7,892,154,210,437,557,000 | 52.074561 | 368 | 0.693649 | false | 
| 
	ocefpaf/compliance-checker | 
	compliance_checker/tests/test_cf.py | 
	2 | 
	105222 | 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import os
import sqlite3
from itertools import chain
from tempfile import gettempdir
import numpy as np
import pytest
from netCDF4 import Dataset
from compliance_checker import cfutil
from compliance_checker.cf import (
    CF1_6Check,
    CF1_7Check,
    dimless_vertical_coordinates_1_6,
    dimless_vertical_coordinates_1_7,
)
from compliance_checker.cf.appendix_d import no_missing_terms
from compliance_checker.cf.util import (
    StandardNameTable,
    create_cached_data_dir,
    download_cf_standard_name_table,
    is_time_variable,
    is_vertical_coordinate,
    units_convertible,
    units_temporal,
)
from compliance_checker.suite import CheckSuite
from compliance_checker.tests import BaseTestCase
from compliance_checker.tests.helpers import MockTimeSeries, MockVariable, MockRaggedArrayRepr
from compliance_checker.tests.resources import STATIC_FILES
def get_results(results):
    """
    Returns a tuple of the value scored, possible, and a list of messages
    in the result set.
    """
    out_of = 0
    scored = 0
    if isinstance(results, dict):
        results_list = results.values()
    else:
        results_list = results
    for r in results_list:
        if isinstance(r.value, tuple):
            out_of += r.value[1]
            scored += r.value[0]
        else:
            out_of += 1
            scored += int(r.value)
    # Store the messages
    messages = []
    for r in results_list:
        messages.extend(r.msgs)
    return scored, out_of, messages
class TestCF1_6(BaseTestCase):
    def setUp(self):
        """Initialize a CF1_6Check object."""
        self.cf = CF1_6Check()
    # --------------------------------------------------------------------------------
    # Helper Methods
    # --------------------------------------------------------------------------------
    def new_nc_file(self):
        """
        Make a new temporary netCDF file for the scope of the test
        """
        nc_file_path = os.path.join(gettempdir(), "example.nc")
        if os.path.exists(nc_file_path):
            raise IOError("File Exists: %s" % nc_file_path)
        nc = Dataset(nc_file_path, "w")
        self.addCleanup(os.remove, nc_file_path)
        self.addCleanup(nc.close)
        return nc
    def test_coord_data_vars(self):
        """Check that coordinate data variables are properly handled"""
        ds = MockTimeSeries()
        ds.createDimension("siglev", 20)
        temp = ds.createVariable(
            "temp",
            np.float64,
            dimensions=("time",),
            fill_value=np.float(99999999999999999999.0),
        )
        temp.coordinates = "sigma noexist"
        ds.createVariable("sigma", np.float64, dimensions=("siglev",))
        self.cf.setup(ds)
        # time is a NUG coordinate variable, sigma is not, but is referred to in
        # variables, so both should show up in cf_coord_data_vars.
        # noexist does not exist in the dataset's variables, so it is not
        # present in coord_data_vars
        self.assertEqual(self.cf.coord_data_vars, {"time", "sigma"})
    def load_dataset(self, nc_dataset):
        """
        Return a loaded NC Dataset for the given path
        """
        if not isinstance(nc_dataset, str):
            raise ValueError("nc_dataset should be a string")
        nc_dataset = Dataset(nc_dataset, "r")
        self.addCleanup(nc_dataset.close)
        return nc_dataset
    # --------------------------------------------------------------------------------
    # Compliance Tests
    # --------------------------------------------------------------------------------
    def test_check_data_types(self):
        """
        Invoke check_data_types() and loop through all variables to check data
        types. Pertains to 2.2. The netCDF data types char, byte, short, int,
        float or real, and double are all acceptable. NetCDF4 allows string as
        data type, which is also acceptable.
        """
        # check default netCDF data types
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        result = self.cf.check_data_types(dataset)
        assert result.value[0] == result.value[1]
        # check if variables of type `string` is properly processed
        dataset = self.load_dataset(STATIC_FILES["string"])
        if dataset.file_format != "NETCDF4":
            raise RuntimeError(
                "netCDF file of wrong format (not netCDF4) was created for checking"
            )
        result = self.cf.check_data_types(dataset)
        assert result.value[0] == result.value[1]
        # check bad data types
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_data_types(dataset)
        # TODO
        # the acdd_reformat_rebase branch has a new .nc file
        # which constructs the temp variable with an int64 dtype --
        # upon rebasing, this should work as expected
        # assert result.msgs[0] == u'The variable temp failed because the datatype is int64'
        # assert result.value == (6, 7)
    def test_check_child_attr_data_types(self):
        """
        Tests check_child_attr_data_types() to ensure the attributes specified in Section 2.5.1
        have a matching data type to their parent variables."""
        # create dataset using MockDataset (default constructor gives it time dimension)
        ds = MockTimeSeries()
        ds.createVariable(
            "temp", np.float64, dimensions=("time")
        )  # add variable "temp" with dimension "time"
        # check where no special data attrs are present, should result good
        result = self.cf.check_child_attr_data_types(
            ds
        )  # checks all special attrs for all variables
        self.assert_result_is_good(result)
        # delete the dataset and start over to create the variable with _FillValue at time of creation
        del ds
        ds = MockTimeSeries()
        ds.createVariable(
            "temp",
            np.float64,
            dimensions=("time",),
            fill_value=np.float(99999999999999999999.0),
        )
        # give temp _FillValue as a float, expect good result
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_good(result)
        # give temp valid_range as an array of floats, all should check out
        ds.variables["temp"].setncattr("valid_range", np.array([35.0, 38.0]))
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_good(result)
        # dimensions would probably not be time for platform,
        # but this makes for an easy sanity check against string-like
        # variables and attributes
        var = ds.createVariable("platform", "S1", dimensions=("time",), fill_value="")
        # this probably doesn't make much sense -- more for _FillValue,
        # but _FillVaue data type checks are done at variable creation time?
        # Can't set manually
        var.setncattr("valid_max", -999)
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_bad(result)
        # str or bytes should work
        var.setncattr("valid_max", "@")
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_good(result)
        var.setncattr("valid_max", b"@")
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_good(result)
        # now give invalid integer for valid_min; above two should still check out, this one should fail
        ds.variables["temp"].setncattr("valid_min", 45)
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_bad(result)
        # now give invalid string for valid_max
        ds.variables["temp"].setncattr("valid_max", "eighty")
        result = self.cf.check_child_attr_data_types(ds)
        self.assert_result_is_bad(result)
        # TODO for CF-1.7: actual_range, actual_min/max
    def test_appendix_a(self):
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        # Ordinarily, options would be specified in the checker constructor, but
        # we set them manually here so we don't have to monkey patch `setUp`
        self.cf.options = {"enable_appendix_a_checks"}
        new_check = copy.deepcopy(self.cf)
        self.cf.setup(dataset)
        aa_results = self.cf.check_appendix_a(dataset)
        flat_messages = {msg for res in aa_results for msg in res.msgs}
        self.assertIn(
            '[Appendix A] Attribute "compress" should not be present in non-coordinate data (D) variable "temp". This attribute may only appear in coordinate data (C).',
            flat_messages,
        )
        self.assertIn("add_offset must be a numeric type", flat_messages)
        nc_obj = MockTimeSeries()
        nc_obj._FillValue = "-9999.00"
        new_check.setup(nc_obj)
        res2 = new_check.check_appendix_a(nc_obj)
        flat_messages = {msg for res in res2 for msg in res.msgs}
        self.assertIn(
            '[Appendix A] Attribute "_FillValue" should not be present in global (G) attributes. This attribute may only appear in coordinate data (C) and non-coordinate data (D).',
            flat_messages,
        )
    def test_naming_conventions(self):
        """
        Section 2.3 Naming Conventions
        Variable, dimension and attr names should begin with a letter and be composed of letters, digits, and underscores.
        """
        # compliant dataset
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        results = self.cf.check_naming_conventions(dataset)
        scored, out_of, messages = get_results(results)
        assert scored == out_of
        # non-compliant dataset
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_naming_conventions(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 3
        assert scored < out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 2
        assert all(r.name == u"§2.3 Naming Conventions" for r in results)
        # another non-compliant dataset
        dataset = self.load_dataset(STATIC_FILES["chap2"])
        results = self.cf.check_naming_conventions(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 3
        assert scored < out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 2
        assert all(r.name == u"§2.3 Naming Conventions" for r in results)
    def test_check_names_unique(self):
        """
        2.3 names should not be distinguished purely by case, i.e., if case is disregarded, no two names should be the same.
        """
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        result = self.cf.check_names_unique(dataset)
        num_var = len(dataset.variables)
        expected = (num_var,) * 2
        self.assertEqual(result.value, expected)
        dataset = self.load_dataset(STATIC_FILES["chap2"])
        result = self.cf.check_names_unique(dataset)
        assert result.value == (6, 7)
        assert (
            result.msgs[0]
            == u"Variables are not case sensitive. Duplicate variables named: not_unique"
        )
    def test_check_dimension_names(self):
        """
        2.4 A variable may have any number of dimensions, including zero, and the dimensions must all have different names.
        """
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_dimension_names(dataset)
        assert result.value == (6, 7)
        dataset = self.load_dataset(STATIC_FILES["chap2"])
        result = self.cf.check_dimension_names(dataset)
        assert result.msgs[0] == u"no_reason has two or more dimensions named time"
    def test_check_dimension_order(self):
        """
        2.4 If any or all of the dimensions of a variable have the interpretations of "date or time" (T), "height or depth" (Z),
        "latitude" (Y), or "longitude" (X) then we recommend, those dimensions to appear in the relative order T, then Z, then Y,
        then X in the CDL definition corresponding to the file. All other dimensions should, whenever possible, be placed to the
        left of the spatiotemporal dimensions.
        """
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_dimension_order(dataset)
        assert result.value == (5, 6)
        assert result.msgs[0] == (
            u"really_bad's spatio-temporal dimensions are not in the "
            "recommended order T, Z, Y, X and/or further dimensions are not "
            "located left of T, Z, Y, X. The dimensions (and their guessed "
            "types) are latitude (Y), power (U) (with U: other/unknown; L: "
            "unlimited)."
        )
        dataset = self.load_dataset(STATIC_FILES["dimension_order"])
        result = self.cf.check_dimension_order(dataset)
        self.assertEqual((3, 3), result.value)
        self.assertEqual([], result.msgs)
    def test_check_fill_value_outside_valid_range(self):
        """
        2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable.
        """
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_fill_value_outside_valid_range(dataset)
        assert result.msgs[0] == (
            u"salinity:_FillValue (1.0) should be outside the "
            "range specified by valid_min/valid_max (-10, 10)"
        )
        dataset = self.load_dataset(STATIC_FILES["chap2"])
        result = self.cf.check_fill_value_outside_valid_range(dataset)
        assert result.value == (1, 2)
        assert result.msgs[0] == (
            u"wind_speed:_FillValue (12.0) should be outside the "
            "range specified by valid_min/valid_max (0.0, 20.0)"
        )
    def test_check_conventions_are_cf_16(self):
        """
        §2.6.1 the NUG defined global attribute Conventions to the string value
        "CF-1.6"
        """
        # :Conventions = "CF-1.6"
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        result = self.cf.check_conventions_version(dataset)
        self.assertTrue(result.value)
        # :Conventions = "CF-1.6 ,ACDD" ;
        dataset = self.load_dataset(STATIC_FILES["conv_multi"])
        result = self.cf.check_conventions_version(dataset)
        self.assertTrue(result.value)
        # :Conventions = "NoConvention"
        dataset = self.load_dataset(STATIC_FILES["conv_bad"])
        result = self.cf.check_conventions_version(dataset)
        self.assertFalse(result.value)
        assert result.msgs[0] == (
            u"§2.6.1 Conventions global attribute does not contain " '"CF-1.6"'
        )
    def test_check_convention_globals(self):
        """
        Load up a dataset and ensure title and history global attrs are checked
        properly (§2.6.2).
        """
        # check for pass
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        result = self.cf.check_convention_globals(dataset)
        assert result.value[0] == result.value[1]
        # check if it doesn't exist that we pass
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_convention_globals(dataset)
        assert result.value[0] != result.value[1]
        assert (
            result.msgs[0]
            == u"§2.6.2 global attribute title should exist and be a non-empty string"
        )
    def test_check_convention_possibly_var_attrs(self):
        """
        §2.6.2 The units attribute is required for all variables that represent dimensional quantities
        (except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
        defined in Section 7.4, "Climatological Statistics").
        Units are not required for dimensionless quantities. A variable with no units attribute is assumed
        to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
        included.
        - units required
        - type must be recognized by udunits
        - if std name specified, must be consistent with standard name table, must also be consistent with a
          specified cell_methods attribute if present
        """
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        result = self.cf.check_convention_possibly_var_attrs(dataset)
        # 10x comment attrs
        # 1x institution
        # 1x source
        # 1x EMPTY references
        assert result.value[0] != result.value[1]
        assert (
            result.msgs[0]
            == u"§2.6.2 references global attribute should be a non-empty string"
        )
        # load bad_data_type.nc
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        result = self.cf.check_convention_possibly_var_attrs(dataset)
        # no references
        # institution is a 10L
        # no source
        # comments don't matter unless they're empty
        assert result.value[0] != result.value[1]
        assert (
            result.msgs[0]
            == u"§2.6.2 salinity:institution should be a non-empty string"
        )
    def test_check_standard_name(self):
        """
        3.3 A standard name is associated with a variable via the attribute standard_name which takes a
        string value comprised of a standard name optionally followed by one or more blanks and a
        standard name modifier
        """
        dataset = self.load_dataset(STATIC_FILES["2dim"])
        results = self.cf.check_standard_name(dataset)
        for each in results:
            self.assertTrue(each.value)
        # load failing ds
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        results = self.cf.check_standard_name(dataset)
        score, out_of, messages = get_results(results)
        # 9 vars checked, 8 fail
        assert len(results) == 9
        assert score < out_of
        assert all(r.name == u"§3.3 Standard Name" for r in results)
        # load different ds --  ll vars pass this check
        dataset = self.load_dataset(STATIC_FILES["reduced_horizontal_grid"])
        results = self.cf.check_standard_name(dataset)
        score, out_of, messages = get_results(results)
        assert score == out_of
    def test_cell_bounds(self):
        dataset = self.load_dataset(STATIC_FILES["grid-boundaries"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (2, 2)
        dataset = self.load_dataset(STATIC_FILES["cf_example_cell_measures"])
        results = self.cf.check_cell_boundaries(dataset)
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        results = self.cf.check_cell_boundaries(dataset)
        dataset = self.load_dataset(STATIC_FILES["bounds_bad_order"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        # Make sure that the rgrid coordinate variable isn't checked for standard_name
        assert (score, out_of) == (0, 2)
        dataset = self.load_dataset(STATIC_FILES["bounds_bad_num_coords"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (0, 2)
        dataset = self.load_dataset(STATIC_FILES["1d_bound_bad"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (0, 2)
    def test_cell_measures(self):
        dataset = self.load_dataset(STATIC_FILES["cell_measure"])
        results = self.cf.check_cell_measures(dataset)
        score, out_of, messages = get_results(results)
        assert score == out_of
        assert score > 0
        dataset = self.load_dataset(STATIC_FILES["bad_cell_measure1"])
        results = self.cf.check_cell_measures(dataset)
        score, out_of, messages = get_results(results)
        message = (
            "The cell_measures attribute for variable PS is formatted incorrectly.  "
            "It should take the form of either 'area: cell_var' or 'volume: cell_var' "
            "where cell_var is the variable describing the cell measures"
        )
        assert message in messages
        dataset = self.load_dataset(STATIC_FILES["bad_cell_measure2"])
        results = self.cf.check_cell_measures(dataset)
        score, out_of, messages = get_results(results)
        message = u"Cell measure variable box_area referred to by PS is not present in dataset variables"
        assert message in messages
    def test_climatology_cell_methods(self):
        """
        Checks that climatology cell_methods strings are properly validated
        """
        dataset = self.load_dataset(STATIC_FILES["climatology"])
        results = self.cf.check_climatological_statistics(dataset)
        # cell methods in this file is
        # "time: mean within days time: mean over days"
        score, out_of, messages = get_results(results)
        self.assertEqual(score, out_of)
        temp_var = dataset.variables["temperature"] = MockVariable(
            dataset.variables["temperature"]
        )
        temp_var.cell_methods = "INVALID"
        results = self.cf.check_climatological_statistics(dataset)
        score, out_of, messages = get_results(results)
        self.assertNotEqual(score, out_of)
        # incorrect time units
        temp_var.cell_methods = "time: mean within years time: mean over days"
        results = self.cf.check_climatological_statistics(dataset)
        score, out_of, messages = get_results(results)
        self.assertNotEqual(score, out_of)
        # can only have third method over years if first two are within and
        # over days, respectively
        temp_var.cell_methods = (
            "time: mean within years time: mean over years time: sum over years"
        )
        results = self.cf.check_climatological_statistics(dataset)
        score, out_of, messages = get_results(results)
        self.assertNotEqual(score, out_of)
        # this, on the other hand, should work.
        temp_var.cell_methods = (
            "time: mean within days time: mean over days time: sum over years"
        )
        results = self.cf.check_climatological_statistics(dataset)
        score, out_of, messages = get_results(results)
        self.assertEqual(score, out_of)
        # parenthesized comment to describe climatology
        temp_var.cell_methods = (
            "time: sum within days time: maximum over days (ENSO years)"
        )
        results = self.cf.check_climatological_statistics(dataset)
        score, out_of, messages = get_results(results)
        self.assertEqual(score, out_of)
    def test_check_ancillary_variables(self):
        """
        Test to ensure that ancillary variables are properly checked
        """
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        results = self.cf.check_ancillary_variables(dataset)
        result_dict = {result.name: result for result in results}
        result = result_dict[u"§3.4 Ancillary Data"]
        assert result.value == (2, 2)
        dataset = self.load_dataset(STATIC_FILES["bad_reference"])
        results = self.cf.check_ancillary_variables(dataset)
        result_dict = {result.name: result for result in results}
        result = result_dict[u"§3.4 Ancillary Data"]
        assert result.value == (1, 2)
        assert u"temp_qc is not a variable in this dataset" == result.msgs[0]
    def test_download_standard_name_table(self):
        """
        Test that a user can download a specific standard name table
        """
        version = "35"
        data_directory = create_cached_data_dir()
        location = os.path.join(
            data_directory, "cf-standard-name-table-test-{0}.xml".format(version)
        )
        download_cf_standard_name_table(version, location)
        # Test that the file now exists in location and is the right version
        self.assertTrue(os.path.isfile(location))
        std_names = StandardNameTable(location)
        self.assertEqual(std_names._version, version)
        self.addCleanup(os.remove, location)
    def test_bad_standard_name_table(self):
        """
        Test that failure in case a bad standard name table is passed.
        """
        # would this ever actually be reached by the code?
        with pytest.raises(IOError):
            StandardNameTable("dummy_non_existent_file.ext")
        nc_obj = MockTimeSeries()
        nc_obj.standard_name_table = "dummy_non_existent_file.ext"
        self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj))
        nc_obj.standard_name_table = np.array([], np.float64)
        self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj))
        nc_obj.standard_name_vocabulary = "CF Standard Name Table vNN???"
        with pytest.warns(
            UserWarning,
            match="Cannot extract CF standard name version "
            "number from standard_name_vocabulary string",
        ):
            self.assertFalse(self.cf._find_cf_standard_name_table(nc_obj))
    def test_check_flags(self):
        """Test that the check for flags works as expected."""
        dataset = self.load_dataset(STATIC_FILES["rutgers"])
        results = self.cf.check_flags(dataset)
        scored, out_of, messages = get_results(results)
        # only 4 variables in this dataset do not have perfect scores
        imperfect = [r.value for r in results if r.value[0] < r.value[1]]
        assert len(imperfect) == 4
    def test_check_flag_masks(self):
        dataset = self.load_dataset(STATIC_FILES["ghrsst"])
        results = self.cf.check_flags(dataset)
        scored, out_of, messages = get_results(results)
        # This is an example of a perfect dataset for flags
        assert scored > 0
        assert scored == out_of
    def test_check_bad_units(self):
        """Load a dataset with units that are expected to fail (bad_units.nc).
        There are 6 variables in this dataset, three of which should give
        an error:
            - time, with units "s" (should be <units> since <epoch>)
            - lat, with units "degrees_E" (should be degrees)
            - lev, with units "level" (deprecated)"""
        dataset = self.load_dataset(STATIC_FILES["2dim"])
        results = self.cf.check_units(dataset)
        for result in results:
            self.assert_result_is_good(result)
        # Not sure why bad_data_type was being used, we have a dataset specifically for bad units
        # dataset = self.load_dataset(STATIC_FILES['bad_data_type'])
        dataset = self.load_dataset(STATIC_FILES["bad_units"])
        all_results = self.cf.check_units(dataset)
        # use itertools.chain() to unpack the lists of messages
        results_list = list(chain(*(r.msgs for r in all_results if r.msgs)))
        # check the results only have '§3.1 Units' as the header
        assert all(r.name == u"§3.1 Units" for r in all_results)
        # check that all the expected variables have been hit
        assert all(
            any(s in msg for msg in results_list) for s in ["time", "lat", "lev"]
        )
    def test_latitude(self):
        """
        Section 4.1 Latitude Coordinate
        """
        # Check compliance
        dataset = self.load_dataset(STATIC_FILES["example-grid"])
        results = self.cf.check_latitude(dataset)
        score, out_of, messages = get_results(results)
        assert score == out_of
        # Verify non-compliance -- 9/12 pass
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_latitude(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 12
        assert scored < out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 3
        assert (r.name == u"§4.1 Latitude Coordinate" for r in results)
        # check with another ds -- all 6 vars checked pass
        dataset = self.load_dataset(STATIC_FILES["rotated_pole_grid"])
        results = self.cf.check_latitude(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 6
        assert scored == out_of
        assert (r.name == u"§4.1 Latitude Coordinate" for r in results)
        # hack to avoid writing to read-only file
        dataset.variables["rlat"] = MockVariable(dataset.variables["rlat"])
        rlat = dataset.variables["rlat"]
        rlat.name = "rlat"
        # test with a bad value
        rlat.units = "degrees_north"
        results = self.cf.check_latitude(dataset)
        scored, out_of, messages = get_results(results)
        wrong_format = u"Grid latitude variable '{}' should use degree equivalent units without east or north components. Current units are {}"
        self.assertTrue(wrong_format.format(rlat.name, rlat.units) in messages)
        rlat.units = "radians"
        results = self.cf.check_latitude(dataset)
        scored, out_of, messages = get_results(results)
        self.assertTrue(wrong_format.format(rlat.name, rlat.units) in messages)
    def test_longitude(self):
        """
        Section 4.2 Longitude Coordinate
        """
        # Check compliance
        dataset = self.load_dataset(STATIC_FILES["example-grid"])
        results = self.cf.check_longitude(dataset)
        score, out_of, messages = get_results(results)
        assert score == out_of
        # Verify non-compliance -- 12 checked, 3 fail
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_longitude(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 12
        assert scored < out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 3
        assert all(r.name == u"§4.2 Longitude Coordinate" for r in results)
        # check different dataset # TODO can be improved for check_latitude too
        dataset = self.load_dataset(STATIC_FILES["rotated_pole_grid"])
        results = self.cf.check_latitude(dataset)
        scored, out_of, messages = get_results(results)
        assert (scored, out_of) == (6, 6)
        # hack to avoid writing to read-only file
        dataset.variables["rlon"] = MockVariable(dataset.variables["rlon"])
        rlon = dataset.variables["rlon"]
        rlon.name = "rlon"
        # test with a bad value
        rlon.units = "degrees_east"
        results = self.cf.check_longitude(dataset)
        scored, out_of, messages = get_results(results)
        wrong_format = u"Grid longitude variable '{}' should use degree equivalent units without east or north components. Current units are {}"
        self.assertTrue(wrong_format.format(rlon.name, rlon.units) in messages)
        rlon.units = "radians"
        results = self.cf.check_longitude(dataset)
        scored, out_of, messages = get_results(results)
        self.assertTrue(wrong_format.format(rlon.name, rlon.units) in messages)
    def test_is_vertical_coordinate(self):
        """
        Section 4.3 Qualifiers for Vertical Coordinate
        NOTE: The standard doesn't explicitly say that vertical coordinates must be a
        coordinate type.
        """
        # Make something that I can attach attrs to
        mock_variable = MockVariable
        # Proper name/standard_name
        known_name = mock_variable()
        known_name.standard_name = "depth"
        self.assertTrue(is_vertical_coordinate("not_known", known_name))
        # Proper Axis
        axis_set = mock_variable()
        axis_set.axis = "Z"
        self.assertTrue(is_vertical_coordinate("not_known", axis_set))
        # Proper units
        units_set = mock_variable()
        units_set.units = "dbar"
        self.assertTrue(is_vertical_coordinate("not_known", units_set))
        # Proper units/positive
        positive = mock_variable()
        positive.units = "m"
        positive.positive = "up"
        self.assertTrue(is_vertical_coordinate("not_known", positive))
    def test_vertical_dimension(self):
        """
        Section 4.3.1 Dimensional Vertical Coordinate
        """
        # Check for compliance
        dataset = self.load_dataset(STATIC_FILES["example-grid"])
        results = self.cf.check_dimensional_vertical_coordinate(dataset)
        assert len(results) == 1
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
        # non-compliance -- one check fails
        dataset = self.load_dataset(STATIC_FILES["illegal-vertical"])
        results = self.cf.check_dimensional_vertical_coordinate(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 1
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
        assert scored < out_of
    def test_appendix_d(self):
        """
        CF 1.6
        Appendix D
        The definitions given here allow an application to compute dimensional
        coordinate values from the dimensionless ones and associated variables.
        The formulas are expressed for a gridpoint (n,k,j,i) where i and j are
        the horizontal indices, k is the vertical index and n is the time index.
        A coordinate variable is associated with its definition by the value of
        the standard_name attribute. The terms in the definition are associated
        with file variables by the formula_terms attribute. The formula_terms
        attribute takes a string value, the string being comprised of
        blank-separated elements of the form "term: variable", where term is a
        keyword that represents one of the terms in the definition, and variable
        is the name of the variable in a netCDF file that contains the values
        for that term. The order of elements is not significant.
        """
        # For each of the listed dimensionless vertical coordinates,
        # verify that the formula_terms match the provided set of terms
        self.assertTrue(
            no_missing_terms(
                "atmosphere_ln_pressure_coordinate",
                {"p0", "lev"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "atmosphere_sigma_coordinate",
                {"sigma", "ps", "ptop"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "atmosphere_hybrid_sigma_pressure_coordinate",
                {"a", "b", "ps"},
                dimless_vertical_coordinates_1_6,
            )
        )
        # test alternative terms for
        # 'atmosphere_hybrid_sigma_pressure_coordinate'
        self.assertTrue(
            no_missing_terms(
                "atmosphere_hybrid_sigma_pressure_coordinate",
                {"ap", "b", "ps"},
                dimless_vertical_coordinates_1_6,
            )
        )
        # check that an invalid set of terms fails
        self.assertFalse(
            no_missing_terms(
                "atmosphere_hybrid_sigma_pressure_coordinate",
                {"a", "b", "p"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "atmosphere_hybrid_height_coordinate",
                {"a", "b", "orog"},
                dimless_vertical_coordinates_1_6,
            )
        )
        # missing terms should cause failure
        self.assertFalse(
            no_missing_terms(
                "atmosphere_hybrid_height_coordinate",
                {"a", "b"},
                dimless_vertical_coordinates_1_6,
            )
        )
        # excess terms should cause failure
        self.assertFalse(
            no_missing_terms(
                "atmosphere_hybrid_height_coordinate",
                {"a", "b", "c", "orog"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "atmosphere_sleve_coordinate",
                {"a", "b1", "b2", "ztop", "zsurf1", "zsurf2"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "ocean_sigma_coordinate",
                {"sigma", "eta", "depth"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "ocean_s_coordinate",
                {"s", "eta", "depth", "a", "b", "depth_c"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "ocean_sigma_z_coordinate",
                {"sigma", "eta", "depth", "depth_c", "nsigma", "zlev"},
                dimless_vertical_coordinates_1_6,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "ocean_double_sigma_coordinate",
                {"sigma", "depth", "z1", "z2", "a", "href", "k_c"},
                dimless_vertical_coordinates_1_6,
            )
        )
    def test_dimensionless_vertical(self):
        """
        Section 4.3.2
        """
        # Check affirmative compliance
        dataset = self.load_dataset(STATIC_FILES["dimensionless"])
        results = self.cf.check_dimensionless_vertical_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        # all variables checked (2) pass
        assert len(results) == 2
        assert scored == out_of
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
        # Check negative compliance -- 3 out of 4 pass
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_dimensionless_vertical_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 4
        assert scored <= out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 2
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
        # test with an invalid formula_terms
        dataset.variables["lev2"] = MockVariable(dataset.variables["lev2"])
        lev2 = dataset.variables["lev2"]
        lev2.formula_terms = "a: var1 b:var2 orog:"
        # create a malformed formula_terms attribute and check that it fails
        # 2/4 still pass
        results = self.cf.check_dimensionless_vertical_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 4
        assert scored <= out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 2
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
    def test_is_time_variable(self):
        var1 = MockVariable()
        var1.standard_name = "time"
        self.assertTrue(is_time_variable("not_time", var1))
        var2 = MockVariable()
        self.assertTrue(is_time_variable("time", var2))
        self.assertFalse(is_time_variable("not_time", var2))
        var3 = MockVariable()
        var3.axis = "T"
        self.assertTrue(is_time_variable("maybe_time", var3))
        var4 = MockVariable()
        var4.units = "seconds since 1900-01-01"
        self.assertTrue(is_time_variable("maybe_time", var4))
    def test_dimensionless_standard_names(self):
        """Check that dimensionless standard names are properly detected"""
        std_names_xml_root = self.cf._std_names._root
        # canonical_units are K, should be False
        self.assertFalse(
            cfutil.is_dimensionless_standard_name(
                std_names_xml_root, "sea_water_temperature"
            )
        )
        # canonical_units are 1, should be True
        self.assertTrue(
            cfutil.is_dimensionless_standard_name(
                std_names_xml_root, "sea_water_practical_salinity"
            )
        )
        # canonical_units are 1e-3, should be True
        self.assertTrue(
            cfutil.is_dimensionless_standard_name(
                std_names_xml_root, "sea_water_salinity"
            )
        )
    def test_check_time_coordinate(self):
        dataset = self.load_dataset(STATIC_FILES["example-grid"])
        results = self.cf.check_time_coordinate(dataset)
        for r in results:
            self.assertTrue(r.value)
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_time_coordinate(dataset)
        scored, out_of, messages = get_results(results)
        assert u"time does not have correct time units" in messages
        assert (scored, out_of) == (1, 2)
    def test_check_calendar(self):
        """Load a dataset with an invalid calendar attribute (non-comp/bad.nc).
        This dataset has a variable, "time" with  calendar attribute "nope"."""
        dataset = self.load_dataset(STATIC_FILES["example-grid"])
        results = self.cf.check_calendar(dataset)
        for r in results:
            self.assertTrue(r.value)
        dataset = self.load_dataset(STATIC_FILES["bad"])
        results = self.cf.check_calendar(dataset)
        scored, out_of, messages = get_results(results)
        assert (
            u"§4.4.1 Variable time should have a valid calendar: 'nope' is not a valid calendar"
            in messages
        )
    def test_check_aux_coordinates(self):
        dataset = self.load_dataset(STATIC_FILES["illegal-aux-coords"])
        results = self.cf.check_aux_coordinates(dataset)
        result_dict = {result.name: result for result in results}
        result = result_dict[u"§5 Coordinate Systems"]
        assert result.msgs == []  # shouldn't have any messages
        assert result.value == (4, 4)
    def test_check_grid_coordinates(self):
        dataset = self.load_dataset(STATIC_FILES["2dim"])
        results = self.cf.check_grid_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        result_dict = {result.name: result for result in results}
        result = result_dict[
            u"§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections"
        ]
        assert result.value == (2, 2)
        assert (scored, out_of) == (2, 2)
    def test_check_two_dimensional(self):
        dataset = self.load_dataset(STATIC_FILES["2dim"])
        results = self.cf.check_grid_coordinates(dataset)
        for r in results:
            self.assertTrue(r.value)
        # Need the bad testing
        dataset = self.load_dataset(STATIC_FILES["bad2dim"])
        results = self.cf.check_grid_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        # all variables checked fail (2)
        assert len(results) == 2
        assert scored < out_of
        assert all(
            r.name
            == u"§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections"
            for r in results
        )
    def test_check_reduced_horizontal_grid(self):
        dataset = self.load_dataset(STATIC_FILES["rhgrid"])
        results = self.cf.check_reduced_horizontal_grid(dataset)
        scored, out_of, messages = get_results(results)
        assert scored == out_of
        assert len(results) == 1
        assert all(r.name == u"§5.3 Reduced Horizontal Grid" for r in results)
        # load failing ds -- one variable has failing check
        dataset = self.load_dataset(STATIC_FILES["bad-rhgrid"])
        results = self.cf.check_reduced_horizontal_grid(dataset)
        scored, out_of, messages = get_results(results)
        assert scored != out_of
        assert len(results) == 2
        assert len([r for r in results if r.value[0] < r.value[1]]) == 1
        assert all(r.name == u"§5.3 Reduced Horizontal Grid" for r in results)
    def test_check_grid_mapping(self):
        dataset = self.load_dataset(STATIC_FILES["mapping"])
        results = self.cf.check_grid_mapping(dataset)
        assert len(results) == 6
        assert len([r.value for r in results.values() if r.value[0] < r.value[1]]) == 0
        expected_name = (
            "§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections"
        )
        assert all(r.name == expected_name for r in results.values())
    def test_is_geophysical(self):
        # check whether string type variable, which are not `cf_role`, are
        # properly processed
        dataset = self.load_dataset(STATIC_FILES["string"])
        if dataset.file_format != "NETCDF4":
            raise RuntimeError(
                "netCDF file of wrong format (not netCDF4) was created for checking"
            )
        try:
            result = cfutil.is_geophysical(dataset, "j")
        except AttributeError:
            pytest.fail(
                "Test probably fails because var.dtype.kind or var.dtype.char "
                "was tested on string-type variable. Consider checking for "
                "`var.dtype is str`"
            )
        assert not result
        # assert False
    # TODO: overhaul to use netCDF global attributes or mocks and variable
    #       attributes
    def test_check_attr_type(self):
        """
        Check that the check_attr_type method checks
        grid_mapping attribute types correctly.
        """
        # test good
        att_name = "test_att"
        att = np.int64(45)
        att_type = "N"  # numeric
        res = self.cf._check_attr_type(att_name, att_type, att)
        self.assertTrue(res[0])
        self.assertEqual(res[1], None)
        # create a temporary variable and test this only
        nc_obj = MockTimeSeries()
        nc_obj.createVariable("temperature", "d", ("time",))
        nc_obj.variables["temperature"].setncattr("test_att", np.float64(45))
        att_name = "test_att"
        _var = nc_obj.variables["temperature"]
        att = np.float64(45)
        att_type = "D"  # numeric, types should match
        res = self.cf._check_attr_type(att_name, att_type, att, _var)
        self.assertTrue(res[0])
        self.assertEqual(res[1], None)
        att_name = "test_att"
        att = "yo"
        att_type = "S"  # string
        res = self.cf._check_attr_type(att_name, att_type, att)
        self.assertTrue(res[0])
        self.assertEqual(res[1], None)
        # test bad
        att_name = "test_att"
        att = np.int64(45)
        att_type = "S"  # string, but att type is numeric
        res = self.cf._check_attr_type(att_name, att_type, att)
        self.assertFalse(res[0])
        self.assertEqual(res[1], "test_att must be a string")
        # test bad
        att_name = "test_att"
        att = "bad"
        att_type = "N"  # numeric, but att type is string
        res = self.cf._check_attr_type(att_name, att_type, att)
        self.assertFalse(res[0])
        self.assertEqual(res[1], "test_att must be a numeric type")
        # create a temporary variable and test this only
        nc_obj = MockTimeSeries()
        nc_obj.createVariable("temperature", "d", ("time",))
        nc_obj.variables["temperature"].setncattr("test_att", np.int32(45))
        _var = nc_obj.variables["temperature"]
        att_name = "test_att"
        att = np.int32(2)
        att_type = "D"  # should be same datatypes
        res = self.cf._check_attr_type(att_name, att_type, att, _var)
        self.assertFalse(res[0])
        self.assertEqual(
            res[1], "test_att must be numeric and must be equivalent to float64 dtype"
        )
    def test_check_grid_mapping_attr_condition(self):
        """
        Ensure the check_grid_mapping_attr_condition() method works as expected.
        """
        # test passes
        attr_name = "latitude_of_projection_origin"
        val = 0
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "longitude_of_projection_origin"
        val = 0
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "longitude_of_prime_meridian"
        val = 0
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "scale_factor_at_central_meridian"
        val = 1
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "scale_factor_at_projection_origin"
        val = 1
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "standard_parallel"
        val = 0
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
        attr_name = "straight_vertical_longitude_from_pole"
        val = 0
        res = self.cf._check_grid_mapping_attr_condition(val, attr_name)
        self.assertTrue(res[0])
    def test_check_geographic_region(self):
        dataset = self.load_dataset(STATIC_FILES["bad_region"])
        results = self.cf.check_geographic_region(dataset)
        scored, out_of, messages = get_results(results)
        # only one variable failed this check in this ds out of 2
        assert len(results) == 2
        assert scored < out_of
        assert (
            u"6.1.1 'Neverland' specified by 'neverland' is not a valid region"
            in messages
        )
    def test_check_packed_data(self):
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        results = self.cf.check_packed_data(dataset)
        score, out_of, messages = get_results(results)
        msgs = [
            u"Type of tempvalid_min attribute (int32) does not match variable type (int64)",
            u"Type of temp:valid_max attribute (int32) does not match variable type (int64)",
            u"Type of salinityvalid_min attribute (int32) does not match variable type (float64)",
            u"Type of salinity:valid_max attribute (int32) does not match variable type (float64)",
        ]
        self.assertEqual(len(results), 4)
        self.assertTrue(score < out_of)
        self.assertTrue(all(m in messages for m in msgs))
    def test_compress_packed(self):
        """Tests compressed indexed coordinates"""
        dataset = self.load_dataset(STATIC_FILES["reduced_horizontal_grid"])
        results = self.cf.check_compression_gathering(dataset)
        self.assertTrue(results[0].value)
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        results = self.cf.check_compression_gathering(dataset)
        self.assertFalse(results[0].value)
        self.assertFalse(results[1].value)
    #def test_check_all_features_are_same_type(self):
    #    dataset = self.load_dataset(STATIC_FILES["rutgers"])
    #    result = self.cf.check_all_features_are_same_type(dataset)
    #    assert result
    #    dataset = self.load_dataset(STATIC_FILES["featureType"])
    #    result = self.cf.check_all_features_are_same_type(dataset)
    #    assert result
    def test_featureType_is_case_insensitive(self):
        """
        Tests that the featureType attribute is case insensitive
        """
        nc = self.new_nc_file()
        nc.featureType = "timeseriesprofile"
        result = self.cf.check_feature_type(nc)
        self.assertTrue(result.value == (1, 1))
        nc.featureType = "timeSeriesProfile"
        result = self.cf.check_feature_type(nc)
        self.assertTrue(result.value == (1, 1))
        nc.featureType = "traJectorYpRofile"
        result = self.cf.check_feature_type(nc)
        self.assertTrue(result.value == (1, 1))
        # This one should fail
        nc.featureType = "timeseriesprofilebad"
        result = self.cf.check_feature_type(nc)
        self.assertTrue(result.value == (0, 1))
    def test_check_units(self):
        """
        Ensure that container variables are not checked for units but geophysical variables are
        """
        dataset = self.load_dataset(STATIC_FILES["units_check"])
        results = self.cf.check_units(dataset)
        # We don't keep track of the variables names for checks that passed, so
        # we can make a strict assertion about how many checks were performed
        # and if there were errors, which there shouldn't be.
        # FIXME (badams): find a better way of grouping together results by
        #                 variable checked instead of checking the number of
        #                 points scored, which should be deprecated, and
        #                 furthermore is fragile and breaks tests when check
        #                 definitions change
        scored, out_of, messages = get_results(results)
        assert scored == 24
        assert out_of == 24
        assert messages == []
    def test_check_duplicates(self):
        """
        Test to verify that the check identifies duplicate axes. Load the
        duplicate_axis.nc dataset and verify the duplicate axes are accounted
        for.
        """
        dataset = self.load_dataset(STATIC_FILES["duplicate_axis"])
        results = self.cf.check_duplicate_axis(dataset)
        scored, out_of, messages = get_results(results)
        # only one check run here, so we can directly compare all the values
        assert scored != out_of
        assert messages[0] == u"'temp' has duplicate axis X defined by [lon_rho, lon_u]"
    def test_check_multi_dimensional_coords(self):
        """
        Test to verify that multi dimensional coordinates are checked for
        sharing names with dimensions
        """
        dataset = self.load_dataset(STATIC_FILES["multi-dim-coordinates"])
        results = self.cf.check_multi_dimensional_coords(dataset)
        scored, out_of, messages = get_results(results)
        # 4 variables were checked in this ds, 2 of which passed
        assert len(results) == 4
        assert len([r for r in results if r.value[0] < r.value[1]]) == 2
        assert all(r.name == u"§5 Coordinate Systems" for r in results)
    def test_64bit(self):
        dataset = self.load_dataset(STATIC_FILES["ints64"])
        suite = CheckSuite()
        suite.checkers = {"cf": CF1_6Check}
        suite.run(dataset, "cf")
    def test_variable_feature_check(self):
        # non-compliant dataset -- 1/1 fail
        dataset = self.load_dataset(STATIC_FILES["bad-trajectory"])
        results = self.cf.check_variable_features(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 2
        assert scored < out_of
        assert len([r for r in results if r.value[0] < r.value[1]]) == 1
        assert all(r.name == u"§9.1 Features and feature types" for r in results)
        # compliant dataset
        dataset = self.load_dataset(STATIC_FILES["trajectory-complete"])
        results = self.cf.check_variable_features(dataset)
        scored, out_of, messages = get_results(results)
        assert scored == out_of
        # compliant(?) dataset
        dataset = self.load_dataset(STATIC_FILES["trajectory-implied"])
        results = self.cf.check_variable_features(dataset)
        scored, out_of, messages = get_results(results)
        assert scored == out_of
    def test_check_cell_methods(self):
        """Load a dataset (climatology.nc) and check the cell methods.
        This dataset has variable "temperature" which has valid cell_methods
        format, cell_methods attribute, and valid names within the
        cell_methods attribute."""
        dataset = self.load_dataset(STATIC_FILES["climatology"])
        results = self.cf.check_cell_methods(dataset)
        scored, out_of, messages = get_results(results)
        # use itertools.chain() to unpack the lists of messages
        results_list = list(chain(*(r.msgs for r in results if r.msgs)))
        # check the results only have expected headers
        assert set([r.name for r in results]).issubset(
            set([u"§7.1 Cell Boundaries", u"§7.3 Cell Methods"])
        )
        # check that all the expected variables have been hit
        assert all("temperature" in msg for msg in results_list)
        # check that all the results have come back passing
        assert all(r.value[0] == r.value[1] for r in results)
        # create a temporary variable and test this only
        nc_obj = MockTimeSeries()
        nc_obj.createVariable("temperature", "d", ("time",))
        temp = nc_obj.variables["temperature"]
        temp.cell_methods = "lat: lon: mean depth: mean (interval: 20 meters)"
        results = self.cf.check_cell_methods(nc_obj)
        # invalid components lat, lon, and depth -- expect score == (6, 9)
        scored, out_of, messages = get_results(results)
        assert scored != out_of
        temp.cell_methods = "lat: lon: mean depth: mean (interval: x whizbangs)"
        results = self.cf.check_cell_methods(nc_obj)
        scored, out_of, messages = get_results(results)
        # check non-standard comments are gauged correctly
        temp.cell_methods = (
            "lat: lon: mean depth: mean (comment: should not go here interval: 2.5 m)"
        )
        results = self.cf.check_cell_methods(nc_obj)
        scored, out_of, messages = get_results(results)
        self.assertTrue(
            u'§7.3.3 The non-standard "comment:" element must come after any standard elements in cell_methods for variable temperature'
            in messages
        )
        # standalone comments require no keyword
        temp.cell_methods = "lon: mean (This is a standalone comment)"
        results = self.cf.check_cell_methods(nc_obj)
        scored, out_of, messages = get_results(results)
        assert "standalone" not in messages
        # check that invalid keywords dealt with
        temp.cell_methods = (
            "lat: lon: mean depth: mean (invalid_keyword: this is invalid)"
        )
        results = self.cf.check_cell_methods(nc_obj)
        scored, out_of, messages = get_results(results)
        self.assertTrue(
            u'§7.3.3 Invalid cell_methods keyword "invalid_keyword:" for variable temperature. Must be one of [interval, comment]'
            in messages
        )
        # check that "parenthetical elements" are well-formed (they should not be)
        temp.cell_methods = (
            "lat: lon: mean depth: mean (interval 0.2 m interval: 0.01 degrees)"
        )
        results = self.cf.check_cell_methods(nc_obj)
        scored, out_of, messages = get_results(results)
        assert (
            u"§7.3.3 Parenthetical content inside temperature:cell_methods is not well formed: interval 0.2 m interval: 0.01 degrees"
            in messages
        )
    # --------------------------------------------------------------------------------
    # Utility Method Tests
    # --------------------------------------------------------------------------------
    def test_temporal_unit_conversion(self):
        self.assertTrue(units_convertible("hours", "seconds"))
        self.assertFalse(units_convertible("hours", "hours since 2000-01-01"))
    def test_units_temporal(self):
        self.assertTrue(units_temporal("hours since 2000-01-01"))
        self.assertFalse(units_temporal("hours"))
        self.assertFalse(units_temporal("days since the big bang"))
class TestCF1_7(BaseTestCase):
    """Extends the CF 1.6 tests. Most of the tests remain the same."""
    def setUp(self):
        """Initialize a CF1_7Check object."""
        self.cf = CF1_7Check()
    def test_check_actual_range(self):
        """Test the check_actual_range method works as expected"""
        # using a with block closes the ds; for checks operating on the data, we need
        # to initialize and then manually close
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))  # dtype=double, dims=time
        # test that if the variable doesn't have an actual_range attr, no score
        result = self.cf.check_actual_range(dataset)
        assert result == []
        dataset.close()
        # NOTE this is a data check
        # if variable values are equal, actual_range should not exist
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))  # dtype=double, dims=time
        dataset.variables["a"][0:500] = 0  # set all 500 vals to 0
        dataset.variables["a"].setncattr("actual_range", [1])
        result = self.cf.check_actual_range(dataset)
        score, out_of, messages = get_results(result)
        assert score < out_of
        assert len(messages) == 1
        assert messages[0] == u"actual_range of 'a' must be 2 elements"
        dataset.close()
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))  # dtype=double, dims=time
        dataset.variables["a"][0] = 0  # set some arbitrary val so not all equal
        dataset.variables["a"].setncattr("actual_range", [1])
        result = self.cf.check_actual_range(dataset)
        score, out_of, messages = get_results(result)
        assert score < out_of
        assert len(messages) == 1
        assert messages[0] == "actual_range of 'a' must be 2 elements"
        dataset.close()
        # NOTE this is a data check
        # check equality to min and max values
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))
        dataset.variables["a"][0] = -299  # set some arbitrary minimum
        dataset.variables["a"][1] = 10e36  # set some arbitrary max > _FillValue default
        dataset.variables["a"].setncattr("actual_range", [0, 0])  # should fail
        result = self.cf.check_actual_range(dataset)
        score, out_of, messages = get_results(result)
        assert score < out_of
        assert len(messages) == 1
        assert (
            messages[0]
            == "actual_range elements of 'a' inconsistent with its min/max values"
        )
        dataset.close()
        # check equality to valid_range attr
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))
        dataset.variables["a"][0] = -299  # set some arbitrary val to not all equal
        dataset.variables["a"][1] = 10e36  # set some arbitrary max > _FillValue default
        dataset.variables["a"].setncattr("valid_range", [1, 3])  # should conflict
        dataset.variables["a"].setncattr("actual_range", [-299, 10e36])
        result = self.cf.check_actual_range(dataset)
        score, out_of, messages = get_results(result)
        assert score < out_of
        assert len(messages) == 1
        assert messages[0] == '"a"\'s actual_range must be within valid_range'
        dataset.close()
        # check equality to valid_min and valid_max values
        dataset = MockTimeSeries()
        dataset.createVariable("a", "d", ("time",))
        dataset.variables["a"][0] = -299  # set some arbitrary minimum
        dataset.variables["a"][1] = 10e36  # set some arbitrary max > _FillValue default
        dataset.variables["a"].setncattr("valid_min", 42)  # conflicting valid_min/max
        dataset.variables["a"].setncattr("valid_max", 45)
        dataset.variables["a"].setncattr("actual_range", [-299, 10e36])
        result = self.cf.check_actual_range(dataset)
        score, out_of, messages = get_results(result)
        assert score < out_of
        assert len(messages) == 2
        assert (
            messages[0] == '"a"\'s actual_range first element must be >= valid_min (42)'
        )
        assert (
            messages[1]
            == '"a"\'s actual_range second element must be <= valid_max (45)'
        )
        dataset.close()
    def test_check_cell_boundaries(self):
        """Check our over-ridden check_cell_boundaries emthod behaves as expected"""
        dataset = self.load_dataset(STATIC_FILES["grid-boundaries"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (2, 2)
        dataset = self.load_dataset(STATIC_FILES["cf_example_cell_measures"])
        results = self.cf.check_cell_boundaries(dataset)
        dataset = self.load_dataset(STATIC_FILES["bad_data_type"])
        results = self.cf.check_cell_boundaries(dataset)
        dataset = self.load_dataset(STATIC_FILES["bounds_bad_order"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        # Make sure that the rgrid coordinate variable isn't checked for standard_name
        assert (score, out_of) == (0, 2)
        dataset = self.load_dataset(STATIC_FILES["bounds_bad_num_coords"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (0, 2)
        dataset = self.load_dataset(STATIC_FILES["1d_bound_bad"])
        results = self.cf.check_cell_boundaries(dataset)
        score, out_of, messages = get_results(results)
        assert (score, out_of) == (0, 2)
        # if the variable has formula_terms, the bounds var must also
        with MockTimeSeries() as dataset:
            dataset.createVariable("a", "d", ("time",))
            dataset.createVariable("b", "d", ("time",))
            dataset.variables["a"].setncattr("bounds", "b")  # set bounds variable
            dataset.variables["a"].setncattr("formula_terms", "test")
            results = self.cf.check_cell_boundaries(dataset)
            score, out_of, messages = get_results(results)
            assert score < out_of
            assert (
                "'a' has 'formula_terms' attr, bounds variable 'b' must also have 'formula_terms'"
                in messages
            )
    def test_cell_measures(self):
        """Over-ride the test_cell_measures from CF1_6"""
        # create a temporary variable and test this only
        with MockTimeSeries() as dataset:
            dataset.createVariable("PS", "d", ("time",))  # dtype=double, dims=time
            dataset.variables["PS"].setncattr("cell_measures", "area: cell_area")
            # ensure the cell_measures var is in the dataset
            dataset.createVariable("cell_area", "d", ("time",))
            dataset.variables["cell_area"].setncattr("units", "m2")
            # run the check
            results = self.cf.check_cell_measures(dataset)
            score, out_of, messages = get_results(results)
            assert (score == out_of) and (score > 0)
        # same thing, but test that the cell_area variable is in
        # the global attr "external_variables"
        with MockTimeSeries() as dataset:
            dataset.createVariable("PS", "d", ("time",))  # dtype=double, dims=time
            dataset.variables["PS"].setncattr("cell_measures", "area: cell_area")
            dataset.setncattr("external_variables", ["cell_area"])
            # run the check
            results = self.cf.check_cell_measures(dataset)
            score, out_of, messages = get_results(results)
            assert score > 0
            assert score == out_of
        # now test a dataset with a poorly formatted cell_measure attr
        dataset = self.load_dataset(STATIC_FILES["bad_cell_measure1"])
        results = self.cf.check_cell_measures(dataset)
        score, out_of, messages = get_results(results)
        message = (
            "The cell_measures attribute for variable PS is formatted incorrectly.  "
            "It should take the form of either 'area: cell_var' or 'volume: cell_var' "
            "where cell_var is the variable describing the cell measures"
        )
        assert message in messages
        # test a dataset where the cell_measure attr is not in the dataset or external_variables
        # check for the variable should fail
        dataset = self.load_dataset(STATIC_FILES["bad_cell_measure2"])
        results = self.cf.check_cell_measures(dataset)
        score, out_of, messages = get_results(results)
        message = u"Cell measure variable box_area referred to by PS is not present in dataset variables"
        assert message in messages
    def test_process_vdatum(self):
        # first, we set up a mock SQLite database
        conn_str = ":memory:"
        conn = sqlite3.connect(conn_str)
        cur = conn.cursor()
        # create alias and vertical datum tables without
        # triggers
        cur.execute(
            """
        CREATE TABLE alias_name(
            table_name TEXT NOT NULL CHECK (table_name IN (
                    'unit_of_measure', 'celestial_body', 'ellipsoid',
                    'area', 'prime_meridian', 'geodetic_datum', 'vertical_datum', 'geodetic_crs',
                    'projected_crs', 'vertical_crs', 'compound_crs', 'conversion', 'grid_transformation',
                    'helmert_transformation', 'other_transformation', 'concatenated_operation')),
            auth_name TEXT NOT NULL CHECK (length(auth_name) >= 1),
            code TEXT NOT NULL CHECK (length(code) >= 1),
            alt_name TEXT NOT NULL CHECK (length(alt_name) >= 2),
            source TEXT
        );
        """
        )
        cur.execute(
            """
        CREATE TABLE vertical_datum (
            auth_name TEXT NOT NULL CHECK (length(auth_name) >= 1),
            code TEXT NOT NULL CHECK (length(code) >= 1),
            name TEXT NOT NULL CHECK (length(name) >= 2),
            description TEXT,
            scope TEXT,
            area_of_use_auth_name TEXT NOT NULL,
            area_of_use_code TEXT NOT NULL,
            deprecated BOOLEAN NOT NULL CHECK (deprecated IN (0, 1)),
            CONSTRAINT pk_vertical_datum PRIMARY KEY (auth_name, code)
        );
        """
        )
        cur.execute(
            """INSERT INTO alias_name VALUES
                       ('vertical_datum', 'EPSG', '5103', 'NAVD88', 'EPSG');
                    """
        )
        cur.execute(
            """INSERT INTO vertical_datum VALUES
                    ('EPSG', '5101', 'Ordnance Datum Newlyn', NULL, NULL,
                     'EPSG', '2792', '0')"""
        )
        cur.close()
        self.assertTrue(self.cf._process_v_datum_str("NAVD88", conn))
        self.assertTrue(self.cf._process_v_datum_str("Ordnance Datum Newlyn", conn))
        # NAD83 isn't a vertical datum to begin with, expect failure
        self.assertFalse(self.cf._process_v_datum_str("NAD83", conn))
    def test_check_grid_mapping_crs_wkt(self):
        dataset = self.load_dataset(STATIC_FILES["mapping"])
        valid_crs_check = copy.deepcopy(self.cf)
        dataset.variables["wgs84"] = MockVariable(dataset.variables["wgs84"])
        dataset.variables["wgs84"].crs_wkt = 1
        results = self.cf.check_grid_mapping(dataset)
        score, out_of, messages = get_results(results)
        self.assertIn("crs_wkt attribute must be a string", messages)
        # test with an invalid OGC CRS WKT string
        dataset.variables["wgs84"].crs_wkt = "EPSG:3785"
        results = self.cf.check_grid_mapping(dataset)
        # reuses and appends to old messages, but this is OK since we only need
        # to check that the invalid CRS string message was added
        score, out_of, messages = get_results(results)
        begin_crs_err_msg = "Cannot parse crs_wkt attribute to CRS using Proj4"
        invalid_crs_str = any(s.startswith(begin_crs_err_msg) for s in messages)
        self.assertTrue(invalid_crs_str)
        self.assertIn("crs_wkt attribute must be a string", messages)
        score, out_of, messages = get_results(results)
        valid_crs_wkt = """PROJCS ["OSGB 1936 / British National Grid",
      GEOGCS ["OSGB 1936",
        DATUM ["OSGB 1936", SPHEROID ["Airy 1830", 6377563.396, 299.3249646]],
        PRIMEM ["Greenwich", 0],
        UNIT ["degree", 0.0174532925199433]],
      PROJECTION ["Transverse Mercator"],
      PARAMETER ["False easting", 400000],
      PARAMETER ["False northing", -100000],
      PARAMETER ["Longitude of natural origin", -2.0],
      PARAMETER ["Latitude of natural origin", 49.0],
      PARAMETER ["Scale factor at natural origin", 0.9996012717],
      UNIT ["metre", 1.0]]"""
        dataset.variables["wgs84"].crs_wkt = valid_crs_wkt
        results = valid_crs_check.check_grid_mapping(dataset)
        score, out_of, messages = get_results(results)
        # without false_easting warning in current file
        msg_len = len(
            [
                m
                for m in messages
                if m
                != "false_easting is a required attribute for grid mapping stereographic"
            ]
        )
        self.assertEqual(msg_len, 0)
    def test_check_grid_mapping_coordinates(self):
        """
        Checks that coordinates variables referred to by a grid mapping
        are well-formed and exist.
        """
        dataset = self.load_dataset(STATIC_FILES["grid_mapping_coordinates"])
        valid_grid_mapping = copy.deepcopy(self.cf)
        valid_grid_mapping_2 = copy.deepcopy(self.cf)
        dataset.variables["temp"] = MockVariable(dataset.variables["temp"])
        results = self.cf.check_grid_mapping(dataset)
        self.assertEqual(results["temp"].value[0], results["temp"].value[1])
        malformed_sep = "crsOSGB: x y : lat lon"
        dataset.variables["temp"].grid_mapping = malformed_sep
        results = valid_grid_mapping.check_grid_mapping(dataset)
        self.assertIn(
            "Could not consume entire grid_mapping expression, please check for well-formedness",
            results["temp"].msgs,
        )
        self.assertLess(*results["temp"].value)
        malformed_var = "crsOSGB: x y_null z_null"
        dataset.variables["temp"].grid_mapping = malformed_var
        results = valid_grid_mapping_2.check_grid_mapping(dataset)
        self.assertEqual(
            [
                "Coordinate-related variable y_null referenced by grid_mapping variable crsOSGB must exist in this dataset",
                "Coordinate-related variable z_null referenced by grid_mapping variable crsOSGB must exist in this dataset",
            ],
            results["temp"].msgs,
        )
        self.assertLess(*results["temp"].value)
    def test_check_grid_mapping_vert_datum_geoid_name(self):
        """Checks that geoid_name works proerly"""
        dataset = self.load_dataset(STATIC_FILES["mapping"])
        dataset.variables["wgs84"] = MockVariable(dataset.variables["wgs84"])
        dataset.variables["wgs84"].geoid_name = "NAVD88"
        dataset.variables["wgs84"].geopotential_datum_name = "WGS84"
        geoid_name_good = copy.deepcopy(self.cf)
        geopotential_datum_name_bad = copy.deepcopy(self.cf)
        results = self.cf.check_grid_mapping(dataset)
        score, out_of, messages = get_results(results)
        self.assertIn(
            "Cannot have both 'geoid_name' and 'geopotential_datum_name' attributes in grid mapping variable 'wgs84'",
            messages,
        )
        del dataset.variables["wgs84"].geopotential_datum_name
        results = geoid_name_good.check_grid_mapping(dataset)
        self.assertEqual(*results["wgs84"].value)
        # WGS84 isn't a valid vertical datum name, of course
        dataset.variables["wgs84"].geopotential_datum_name = "WGS84"
        del dataset.variables["wgs84"].geoid_name
        results = geopotential_datum_name_bad.check_grid_mapping(dataset)
        self.assertLess(*results["wgs84"].value)
        self.assertIn(
            "Vertical datum value 'WGS84' for attribute 'geopotential_datum_name' in grid mapping variable 'wgs84' is not valid",
            results["wgs84"].msgs,
        )
    def test_check_conventions_are_cf_1_7(self):
        """Ensure the check_conventions_are_cf_1_7() check works as expected"""
        # create a temporary variable and test this only
        with MockTimeSeries() as dataset:
            # no Conventions attribute
            result = self.cf.check_conventions_version(dataset)
            self.assertFalse(result.value)
        with MockTimeSeries() as dataset:
            # incorrect Conventions attribute
            dataset.setncattr("Conventions", "CF-1.9999")
            result = self.cf.check_conventions_version(dataset)
            self.assertFalse(result.value)
        with MockTimeSeries() as dataset:
            # correct Conventions attribute
            dataset.setncattr("Conventions", "CF-1.7, ACDD-1.3")
            result = self.cf.check_conventions_version(dataset)
            self.assertTrue(result.value)
    def test_appendix_d(self):
        """
        CF 1.7
        Appendix D
        As the CF-1.7 dimensionless vertical coordinates dict extends the 1.6 version,
        this test only examines the extensions made there.
        """
        # For each of the listed dimensionless vertical coordinates,
        # verify that the formula_terms match the provided set of terms
        self.assertTrue(
            no_missing_terms(
                "ocean_s_coordinate_g1",
                {"s", "C", "eta", "depth", "depth_c"},
                dimless_vertical_coordinates_1_7,
            )
        )
        self.assertTrue(
            no_missing_terms(
                "ocean_s_coordinate_g2",
                {"s", "C", "eta", "depth", "depth_c"},
                dimless_vertical_coordinates_1_7,
            )
        )
    def test_check_dimensionless_vertical_coordinate_1_7(self):
        """
        Unit test for _check_dimensionless_vertical_coordinate_1_7 method.
        """
        deprecated_units = ["level", "layer", "sigma_level"]
        ret_val = []
        # create mock dataset for test; create three variables, one as dimensionless
        with MockTimeSeries() as dataset:
            dataset.createVariable("lev", "d")  # dtype=double, dims=1
            dataset.variables["lev"].setncattr(
                "standard_name", "atmosphere_sigma_coordinate"
            )
            dataset.variables["lev"].setncattr(
                "formula_terms", "sigma: lev ps: PS ptop: PTOP"
            )
            dataset.createVariable("PS", "d", ("time",))  # dtype=double, dims=time
            dataset.createVariable("PTOP", "d", ("time",))  # dtype=double, dims=time
            # run the check
            self.cf._check_dimensionless_vertical_coordinate_1_7(
                dataset,
                "lev",
                deprecated_units,
                ret_val,
                dimless_vertical_coordinates_1_7,
            )
            # one should have failed, as no computed_standard_name is assigned
            score, out_of, messages = get_results(ret_val)
            assert score == 0
            assert out_of == 1
            # this time, assign compufted_standard_name
            ret_val = []
            dataset.variables["lev"].setncattr("computed_standard_name", "air_pressure")
            # run the check
            self.cf._check_dimensionless_vertical_coordinate_1_7(
                dataset,
                "lev",
                deprecated_units,
                ret_val,
                dimless_vertical_coordinates_1_7,
            )
            # computed_standard_name is assigned, should pass
            score, out_of, messages = get_results(ret_val)
            assert score == out_of
    def test_dimensionless_vertical(self):
        """
        Section 4.3.2 check, but for CF-1.7 implementation. With the refactor in
        place, these are more of integration tests, but kept here for simplicity.
        """
        # Check affirmative compliance
        dataset = self.load_dataset(STATIC_FILES["dimensionless"])
        dataset.variables["lev"] = MockVariable(dataset.variables["lev"])
        dataset.variables["lev"].computed_standard_name = "air_pressure"
        results = self.cf.check_dimensionless_vertical_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        # all variables checked (2) pass
        assert len(results) == 3
        assert scored == out_of
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
        # make one variable's computed_standard_name incorrect, one should fail
        dataset.variables["lev"].computed_standard_name = "definitely_not_right"
        results = self.cf.check_dimensionless_vertical_coordinates(dataset)
        scored, out_of, messages = get_results(results)
        assert len(results) == 3
        assert scored < out_of
        assert all(r.name == u"§4.3 Vertical Coordinate" for r in results)
    def test_check_attr_type(self):
        """
        Ensure the _check_attr_type method works as expected.
        """
        # create a temporary variable and test this only
        nc_obj = MockTimeSeries()
        nc_obj.createVariable("temperature", "d", ("time",))
        nc_obj.variables["temperature"].setncattr("test_att", np.float64(45))
        att_name = "test_att"
        _var = nc_obj.variables["temperature"]
        # first, test all valid checks show that it's valid
        attr = "my_attr_value"  # string
        attr_type = "S"
        result = self.cf._check_attr_type(att_name, attr_type, attr)
        self.assertTrue(result[0])
        attr = np.int64(1)
        attr_type = "N"
        self.assertTrue(self.cf._check_attr_type(att_name, attr_type, attr)[0])
        attr = np.float64(45)
        attr_type = "D"
        self.assertTrue(self.cf._check_attr_type(att_name, attr_type, attr, _var)[0])
        # check failures
        attr = "my_attr_value"
        attr_type = "N"  # should be numeric
        self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr)[0])
        attr = np.int(64)
        attr_type = "S"  # should be string
        self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr)[0])
        nc_obj = MockTimeSeries()
        nc_obj.createVariable("temperature", "d", ("time",))
        nc_obj.variables["temperature"].setncattr("test_att", np.int32(45))
        _var = nc_obj.variables["temperature"]
        attr = np.int32(45)
        attr_type = "D"  # should match
        self.assertFalse(self.cf._check_attr_type(att_name, attr_type, attr, _var)[0])
    def test_check_grid_mapping_attr_condition(self):
        """
        Ensure the CF-1.7 implementation of _check_grid_mapping_attr_condition()
        works as expected.
        """
        # test good
        att_name = "horizontal_datum_name"
        att = "Monte Mario (Rome)"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "prime_meridian_name"
        att = "Athens"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "reference_ellipsoid_name"
        att = "Airy 1830"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "towgs84"
        att = np.array([0, 0, 0], dtype=np.float64)  # len 3
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "towgs84"
        att = np.array([0, 0, 0, 0, 0, 0], dtype=np.float64)  # len 6
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "towgs84"
        att = np.array([0, 0, 0, 0, 0, 0, 0], dtype=np.float64)  # len 7
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "geographic_crs_name"
        att = "NAD83(CSRS98)"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "geoid_name"
        att = "Mayotte 1950"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "geopotential_datum_name"
        att = "NAVD88"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        att_name = "projected_crs_name"
        att = "Anguilla 1957 / British West Indies Grid"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertTrue(res[0])
        # test bad
        att_name = "horizontal_datum_name"
        att = "bad"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "prime_meridian_name"
        att = "bad"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "reference_ellipsoid_name"
        att = "goofy goober"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "towgs84"
        att = np.array([0, 0, 0], dtype=np.int64)  # len 3, wrong dtype
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "towgs84"
        att = np.array([0, 0, 0, 0], dtype=np.int64)  # len 4
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "towgs84"
        att = np.float64(0)  # single value, right dtype
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "geographic_crs_name"
        att = "badbadbadbadbadnotinhere"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "geoid_name"
        att = "yooooooo"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "geopotential_datum_name"
        att = "NAVBAD BAD"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
        att_name = "projected_crs_name"
        att = "Teddy Bruschi"
        res = self.cf._check_grid_mapping_attr_condition(att, att_name)
        self.assertFalse(res[0])
    def test_check_gmattr_existence_condition_geoid_name_geoptl_datum_name(self):
        # create mock dataset for test; create three variables, one as dimensionless
        # test good (either-or)
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("geoid_name", "blah")
        res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(
            dataset.variables["lev"]
        )
        self.assertTrue(res[0])
        dataset.close()
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("geopotential_datum_name", "blah")
        res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(
            dataset.variables["lev"]
        )
        self.assertTrue(res[0])
        dataset.close()
        # bad
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("geopotential_datum_name", "blah")
        dataset.variables["lev"].setncattr("geoid_name", "blah")
        res = self.cf._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(
            dataset.variables["lev"]
        )
        self.assertFalse(res[0])
        dataset.close()
    def test_check_gmattr_existence_condition_ell_pmerid_hdatum(self):
        # test good (all)
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah")
        dataset.variables["lev"].setncattr("prime_meridian_name", "blah")
        dataset.variables["lev"].setncattr("horizontal_datum_name", "blah")
        res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum(
            dataset.variables["lev"]
        )
        self.assertTrue(res[0])
        dataset.close()
        # test bad (not all)
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah")
        res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum(
            dataset.variables["lev"]
        )
        self.assertFalse(res[0])
        dataset.close()
        # test bad (not all)
        dataset = MockTimeSeries()
        dataset.createVariable("lev", "d")  # dtype=double, dims=1
        dataset.variables["lev"].setncattr("reference_ellipsoid_name", "blah")
        dataset.variables["lev"].setncattr("prime_meridian_name", "blah")
        res = self.cf._check_gmattr_existence_condition_ell_pmerid_hdatum(
            dataset.variables["lev"]
        )
        self.assertFalse(res[0])
        dataset.close()
    def test_check_add_offset_scale_factor_type(self):
        dataset = MockTimeSeries()  # time lat lon depth
        temp = dataset.createVariable("temp", "d", dimensions=("time",))
        # set att bad (str)
        temp.setncattr("add_offset", "foo")
        r = self.cf._check_add_offset_scale_factor_type(temp, "add_offset")
        self.assertFalse(r.value)
        temp.setncattr("scale_factor", "foo")
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertFalse(r.value)
        # set bad np val
        temp.setncattr("scale_factor", np.float32(5))
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertFalse(r.value)
        temp.setncattr("scale_factor", np.uint(5))
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertFalse(r.value)
        # set good
        temp.setncattr("scale_factor", np.float(5))
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertTrue(r.value)
        temp.setncattr("scale_factor", np.double(5))
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertTrue(r.value)
        # set same dtype
        dataset = MockTimeSeries()  # time lat lon depth
        temp = dataset.createVariable("temp", np.int, dimensions=("time",))
        temp.setncattr("scale_factor", np.int(5))
        r = self.cf._check_add_offset_scale_factor_type(temp, "scale_factor")
        self.assertTrue(r.value)
class TestCFUtil(BaseTestCase):
    """
    Class to test the cfutil module.
    """
    def test_is_variable_valid_ragged_array_repr_featureType(self):
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        # add a variable that isn't recognized as geophysical
        v = nc.createVariable(
            "data1",
            "d",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v.setncattr("cf_role", "blah")
        self.assertFalse(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
        # add geophysical variable with correct dimension
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        v = nc.createVariable(
            "data1",
            "d",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v.setncattr("standard_name", "sea_water_pressure")
        # test the variable
        self.assertTrue(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
        # add good variable and another variable, this time with the improper dimension
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        v = nc.createVariable(
            "data1",
            "d",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v.setncattr("standard_name", "sea_water_pressure")
        v2 = nc.createVariable(
            "data2",
            "d",
            ("INSTANCE_DIMENSION",),
            fill_value=None
        )
        v2.setncattr("standard_name", "sea_water_salinity")
        # good variable should pass, second should fail
        self.assertTrue(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
        self.assertFalse(cfutil.is_variable_valid_ragged_array_repr_featureType(nc, "data2"))
    def test_is_dataset_valid_ragged_array_repr_featureType(self):
        # first test single featureType
        # ----- timeseries, indexed ----- #
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # we'll add another cf_role variable
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # we'll add another index variable, also bad
        nc = MockRaggedArrayRepr(
            "timeseries",
            "indexed"
        )
        v = nc.createVariable(
            "index_var2",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None)
        v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # ----- timeseries, contiguous ----- #
        nc = MockRaggedArrayRepr(
            "timeseries",
            "contiguous"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # add another cf_role var, bad
        nc = MockRaggedArrayRepr(
            "timeseries",
            "contiguous"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # add another count variable, bad
        v = nc.createVariable(
            "count_var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
        )
        # ----- profile, indexed ----- #
        nc = MockRaggedArrayRepr(
            "profile",
            "indexed"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # add another cf_role var
        nc = MockRaggedArrayRepr(
            "profile",
            "indexed"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # we'll add another index variable, also bad
        nc = MockRaggedArrayRepr(
            "profile",
            "indexed"
        )
        v = nc.createVariable(
            "index_var2",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None)
        v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # ----- profile, contiguous ----- #
        nc = MockRaggedArrayRepr(
            "profile",
            "contiguous"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # add another cf_role var
        nc = MockRaggedArrayRepr(
            "profile",
            "contiguous"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # we'll add another count variable, also bad
        nc = MockRaggedArrayRepr(
            "profile",
            "contiguous"
        )
        v = nc.createVariable(
            "index_var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
        )
        # ----- trajectory, indexed ----- #
        nc = MockRaggedArrayRepr(
            "trajectory",
            "indexed"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # add another cf_role var
        nc = MockRaggedArrayRepr(
            "trajectory",
            "indexed"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # we'll add another index variable, also bad
        nc = MockRaggedArrayRepr(
            "trajectory",
            "indexed"
        )
        v = nc.createVariable(
            "index_var2",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None)
        v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # ----- trajectory, contiguous ----- #
        nc = MockRaggedArrayRepr(
            "trajectory",
            "contiguous"
        )
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # add another cf_role var
        nc = MockRaggedArrayRepr(
            "trajectory",
            "contiguous"
        )
        v = nc.createVariable(
            "var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # we'll add another count variable, also bad
        nc = MockRaggedArrayRepr(
            "trajectory",
            "contiguous"
        )
        v = nc.createVariable(
            "index_var2",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
        )
        # ----- now test compound featureType ----- #
        # ----- timeSeriesProfile ----- #
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        # NOTE
        # has no geophysical vars, so should (?) (will) fail
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        # add a geophysical variable and test again
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v1.setncattr("standard_name", "pressure")
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        # add a thid cf_role variable - this should fail
        v = nc.createVariable(
            "cf_role_var3",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        # set the index variable to have an incorrect attr
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        nc.variables["station_index_variable"].instance_dimension = "SIKE!"
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        # change the sample_dimension attr on the count variable, bad
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        nc.variables["counter_var"].sample_dimension = "SIKE!"
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        # give another geophysical data variable a different dimension
        nc = MockRaggedArrayRepr(
            "timeSeriesProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v1 = nc.createVariable(
            "data2",
            "i",
            ("STATION_DIMENSION",), # bad!
            fill_value=None
        )
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
        )
        # ----- trajectoryProfile ----- #
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        # NOTE
        # has no geophysical vars, so should (?) (will) fail
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
        # add a geophysical variable and test again
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v1.setncattr("standard_name", "pressure")
        self.assertTrue(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        # add a thid cf_role variable - this should fail
        v = nc.createVariable(
            "cf_role_var3",
            "i",
            ("INSTANCE_DIMENSION",),
            fill_value=None)
        v.setncattr("cf_role", "yeetyeet_id")
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
        # set the index variable to have an incorrect attr
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        nc.variables["station_index_variable"].instance_dimension = "SIKE!"
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
        # change the sample_dimension attr on the count variable, bad
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        nc.variables["counter_var"].sample_dimension = "SIKE!"
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
        # give another geophysical data variable a different dimension
        nc = MockRaggedArrayRepr(
            "trajectoryProfile"
        )
        v1 = nc.createVariable(
            "data1",
            "i",
            ("SAMPLE_DIMENSION",),
            fill_value=None
        )
        v1 = nc.createVariable(
            "data2",
            "i",
            ("STATION_DIMENSION",), # bad!
            fill_value=None
        )
        self.assertFalse(
            cfutil.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
        )
 | 
	apache-2.0 | -7,127,888,017,928,970,000 | 38.029314 | 181 | 0.592837 | false | 
| 
	zjuchenyuan/BioWeb | 
	Lib/requests/packages/__init__.py | 
	61 | 
	1494 | 
	'''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
    requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
    from . import urllib3
except ImportError:
    import urllib3
    sys.modules['%s.urllib3' % __name__] = urllib3
try:
    from . import chardet
except ImportError:
    import chardet
    sys.modules['%s.chardet' % __name__] = chardet
try:
    from . import idna
except ImportError:
    import idna
    sys.modules['%s.idna' % __name__] = idna
 | 
	mit | -3,740,854,245,673,782,000 | 34.571429 | 78 | 0.754351 | false | 
| 
	pschmitt/home-assistant | 
	tests/components/local_file/test_camera.py | 
	21 | 
	5287 | 
	"""The tests for local file camera component."""
from unittest import mock
from homeassistant.components.local_file.const import DOMAIN, SERVICE_UPDATE_FILE_PATH
from homeassistant.setup import async_setup_component
from tests.common import mock_registry
async def test_loading_file(hass, hass_client):
    """Test that it loads image from disk."""
    mock_registry(hass)
    with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
        "os.access", mock.Mock(return_value=True)
    ):
        await async_setup_component(
            hass,
            "camera",
            {
                "camera": {
                    "name": "config_test",
                    "platform": "local_file",
                    "file_path": "mock.file",
                }
            },
        )
        await hass.async_block_till_done()
    client = await hass_client()
    m_open = mock.mock_open(read_data=b"hello")
    with mock.patch(
        "homeassistant.components.local_file.camera.open", m_open, create=True
    ):
        resp = await client.get("/api/camera_proxy/camera.config_test")
    assert resp.status == 200
    body = await resp.text()
    assert body == "hello"
async def test_file_not_readable(hass, caplog):
    """Test a warning is shown setup when file is not readable."""
    mock_registry(hass)
    with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
        "os.access", mock.Mock(return_value=False)
    ):
        await async_setup_component(
            hass,
            "camera",
            {
                "camera": {
                    "name": "config_test",
                    "platform": "local_file",
                    "file_path": "mock.file",
                }
            },
        )
        await hass.async_block_till_done()
    assert "Could not read" in caplog.text
    assert "config_test" in caplog.text
    assert "mock.file" in caplog.text
async def test_camera_content_type(hass, hass_client):
    """Test local_file camera content_type."""
    cam_config_jpg = {
        "name": "test_jpg",
        "platform": "local_file",
        "file_path": "/path/to/image.jpg",
    }
    cam_config_png = {
        "name": "test_png",
        "platform": "local_file",
        "file_path": "/path/to/image.png",
    }
    cam_config_svg = {
        "name": "test_svg",
        "platform": "local_file",
        "file_path": "/path/to/image.svg",
    }
    cam_config_noext = {
        "name": "test_no_ext",
        "platform": "local_file",
        "file_path": "/path/to/image",
    }
    await async_setup_component(
        hass,
        "camera",
        {"camera": [cam_config_jpg, cam_config_png, cam_config_svg, cam_config_noext]},
    )
    await hass.async_block_till_done()
    client = await hass_client()
    image = "hello"
    m_open = mock.mock_open(read_data=image.encode())
    with mock.patch(
        "homeassistant.components.local_file.camera.open", m_open, create=True
    ):
        resp_1 = await client.get("/api/camera_proxy/camera.test_jpg")
        resp_2 = await client.get("/api/camera_proxy/camera.test_png")
        resp_3 = await client.get("/api/camera_proxy/camera.test_svg")
        resp_4 = await client.get("/api/camera_proxy/camera.test_no_ext")
    assert resp_1.status == 200
    assert resp_1.content_type == "image/jpeg"
    body = await resp_1.text()
    assert body == image
    assert resp_2.status == 200
    assert resp_2.content_type == "image/png"
    body = await resp_2.text()
    assert body == image
    assert resp_3.status == 200
    assert resp_3.content_type == "image/svg+xml"
    body = await resp_3.text()
    assert body == image
    # default mime type
    assert resp_4.status == 200
    assert resp_4.content_type == "image/jpeg"
    body = await resp_4.text()
    assert body == image
async def test_update_file_path(hass):
    """Test update_file_path service."""
    # Setup platform
    mock_registry(hass)
    with mock.patch("os.path.isfile", mock.Mock(return_value=True)), mock.patch(
        "os.access", mock.Mock(return_value=True)
    ):
        camera_1 = {"platform": "local_file", "file_path": "mock/path.jpg"}
        camera_2 = {
            "platform": "local_file",
            "name": "local_file_camera_2",
            "file_path": "mock/path_2.jpg",
        }
        await async_setup_component(hass, "camera", {"camera": [camera_1, camera_2]})
        await hass.async_block_till_done()
        # Fetch state and check motion detection attribute
        state = hass.states.get("camera.local_file")
        assert state.attributes.get("friendly_name") == "Local File"
        assert state.attributes.get("file_path") == "mock/path.jpg"
        service_data = {"entity_id": "camera.local_file", "file_path": "new/path.jpg"}
        await hass.services.async_call(DOMAIN, SERVICE_UPDATE_FILE_PATH, service_data)
        await hass.async_block_till_done()
        state = hass.states.get("camera.local_file")
        assert state.attributes.get("file_path") == "new/path.jpg"
        # Check that local_file_camera_2 file_path is still as configured
        state = hass.states.get("camera.local_file_camera_2")
        assert state.attributes.get("file_path") == "mock/path_2.jpg"
 | 
	apache-2.0 | 7,287,199,300,171,845,000 | 30.849398 | 87 | 0.584642 | false | 
| 
	emrah-b/oclapi | 
	django-nonrel/ocl/integration_tests/tests/bulk_import_validation.py | 
	4 | 
	13005 | 
	from django.contrib.auth.models import User
from concepts.importer import ConceptsImporter, ValidationLogger
from concepts.validation_messages import OPENMRS_NAMES_EXCEPT_SHORT_MUST_BE_UNIQUE, OPENMRS_MUST_HAVE_EXACTLY_ONE_PREFERRED_NAME, \
    OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED, OPENMRS_PREFERRED_NAME_UNIQUE_PER_SOURCE_LOCALE, \
    OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME, OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE
from concepts.models import Concept, ConceptVersion
from concepts.tests import ConceptBaseTest
from integration_tests.models import TestStream
from mappings.importer import MappingsImporter
from mappings.models import Mapping
from mappings.models import MappingVersion
from mappings.tests import MappingBaseTest
from sources.models import SourceVersion
from oclapi.models import CUSTOM_VALIDATION_SCHEMA_OPENMRS, LOOKUP_CONCEPT_CLASSES
from test_helper.base import create_source, create_user, create_concept
class BulkConceptImporterTest(ConceptBaseTest):
    def setUp(self):
        super(BulkConceptImporterTest, self).setUp()
        User.objects.create(
            username='superuser',
            password='superuser',
            email='[email protected]',
            last_name='Super',
            first_name='User',
            is_superuser=True
        )
    def test_import_single_concept_without_fully_specified_name(self):
        self.testfile = open('./integration_tests/fixtures/concept_without_fully_specified_name.json', 'rb')
        stderr_stub = TestStream()
        source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
        importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False)
        importer.import_concepts(total=1)
        self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue())
    def test_import_concepts_with_invalid_records(self):
        self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb')
        stderr_stub = TestStream()
        source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
        importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False)
        importer.import_concepts(total=7)
        self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue())
        self.assertTrue(OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE in stderr_stub.getvalue())
        self.assertEquals(5, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
        self.assertEquals(5, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
    def test_update_concept_with_invalid_record(self):
        (concept, _) = create_concept(mnemonic='1', user=self.user1, source=self.source1, names=[self.name])
        self.testfile = open('./integration_tests/fixtures/concept_without_fully_specified_name.json', 'rb')
        stderr_stub = TestStream()
        source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
        importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, save_validation_errors=False)
        importer.import_concepts(total=1)
        self.assertTrue(OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME in stderr_stub.getvalue())
        self.assertEquals(1, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
        self.assertEquals(1, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
    def test_import_concepts_into_openmrs_validated_source_with_valid_records(self):
        test_file = open('./integration_tests/fixtures/concepts_for_openmrs_validation.json', 'rb')
        stderr_stub = TestStream()
        user = create_user()
        source = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
        importer = ConceptsImporter(source, test_file, 'test', TestStream(), stderr_stub, save_validation_errors=False)
        importer.import_concepts(total=5)
        self.assertTrue(OPENMRS_MUST_HAVE_EXACTLY_ONE_PREFERRED_NAME in stderr_stub.getvalue())
        self.assertTrue(OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED in stderr_stub.getvalue())
        self.assertTrue(OPENMRS_SHORT_NAME_CANNOT_BE_PREFERRED in stderr_stub.getvalue())
        self.assertTrue(OPENMRS_NAMES_EXCEPT_SHORT_MUST_BE_UNIQUE in stderr_stub.getvalue())
        self.assertEquals(2, Concept.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
        self.assertEquals(2, ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES).count())
    def test_validation_error_file_output(self):
        self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb')
        stderr_stub = TestStream()
        logger = ValidationLogger(output=TestStream())
        source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
        importer = ConceptsImporter(source, self.testfile, 'test', TestStream(), stderr_stub, validation_logger=logger)
        importer.import_concepts(total=7)
        self.assertTrue('MNEMONIC;ERROR;JSON' in logger.output.getvalue())
        self.assertTrue('4;%s' % OPENMRS_AT_LEAST_ONE_FULLY_SPECIFIED_NAME  in logger.output.getvalue())
        self.assertTrue('7;%s' % OPENMRS_FULLY_SPECIFIED_NAME_UNIQUE_PER_SOURCE_LOCALE  in logger.output.getvalue())
    def test_validation_error_file_exists(self):
        self.testfile = open('./integration_tests/fixtures/valid_invalid_concepts.json', 'rb')
        stderr_stub = TestStream()
        output_file_name = 'test_file.csv'
        logger = ValidationLogger(output_file_name=output_file_name)
        importer = ConceptsImporter(create_source(user=self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS), self.testfile, 'test', TestStream(), stderr_stub,
                                    validation_logger=logger)
        importer.import_concepts(total=7)
        from os import path, remove
        self.assertTrue(path.exists(output_file_name))
        remove(output_file_name)
class ConceptImporterTest(ConceptBaseTest):
    def setUp(self):
        super(ConceptImporterTest, self).setUp()
        User.objects.create(
            username='superuser',
            password='superuser',
            email='[email protected]',
            last_name='Super',
            first_name='User',
            is_superuser=True
        )
        self.testfile = open('./integration_tests/fixtures/one_concept.json', 'rb')
    def test_import_job_for_one_record(self):
        stdout_stub = TestStream()
        importer = ConceptsImporter(self.source1, self.testfile, 'test', stdout_stub, TestStream(), save_validation_errors=False)
        importer.import_concepts(total=1)
        self.assertTrue('Created new concept: 1 = Diagnosis' in stdout_stub.getvalue())
        self.assertTrue('Finished importing concepts!' in stdout_stub.getvalue())
        inserted_concept = Concept.objects.get(mnemonic='1')
        self.assertEquals(inserted_concept.parent, self.source1)
        inserted_concept_version = ConceptVersion.objects.get(versioned_object_id=inserted_concept.id)
        source_version_latest = SourceVersion.get_latest_version_of(self.source1)
        self.assertEquals(source_version_latest.concepts, [inserted_concept_version.id])
    def test_import_job_for_change_in_data(self):
        stdout_stub = TestStream()
        create_concept(mnemonic='1', user=self.user1, source=self.source1)
        importer = ConceptsImporter(self.source1, self.testfile, 'test', stdout_stub, TestStream(), save_validation_errors=False)
        importer.import_concepts(total=1)
        all_concept_versions = ConceptVersion.objects.exclude(concept_class__in=LOOKUP_CONCEPT_CLASSES)
        self.assertEquals(len(all_concept_versions), 2)
        latest_concept_version = [version for version in all_concept_versions if version.previous_version][0]
        self.assertEquals(len(latest_concept_version.names), 4)
        self.assertTrue(('Updated concept, replacing version ID ' + latest_concept_version.previous_version.id) in stdout_stub.getvalue())
        self.assertTrue('**** Processed 1 out of 1 concepts - 1 updated, ****' in stdout_stub.getvalue())
class MappingImporterTest(MappingBaseTest):
    def setUp(self):
        super(MappingImporterTest, self).setUp()
        User.objects.create(
            username='superuser',
            password='superuser',
            email='[email protected]',
            last_name='Super',
            first_name='User',
            is_superuser=True
        )
        self.testfile = open('./integration_tests/fixtures/one_mapping.json', 'rb')
    def test_import_job_for_one_record(self):
        stdout_stub = TestStream()
        stderr_stub = TestStream()
        importer = MappingsImporter(self.source1, self.testfile, stdout_stub, stderr_stub, 'test')
        importer.import_mappings(total=1)
        self.assertTrue('Created new mapping:' in stdout_stub.getvalue())
        self.assertTrue('/users/user1/sources/source1/:413532003' in stdout_stub.getvalue())
        inserted_mapping = Mapping.objects.get(to_concept_code='413532003')
        self.assertEquals(inserted_mapping.to_source, self.source1)
        self.assertEquals(inserted_mapping.from_source, self.source2)
        mapping_ids = SourceVersion.get_latest_version_of(self.source1).mappings
        mapping_version = MappingVersion.objects.get(versioned_object_id=inserted_mapping.id, is_latest_version=True)
        self.assertEquals(mapping_ids[0], mapping_version.id)
    def test_import_job_for_one_invalid_record(self):
        stdout_stub = TestStream()
        stderr_stub = TestStream()
        invalid_json_file = open('./integration_tests/fixtures/one_invalid_mapping.json', 'rb')
        importer = MappingsImporter(self.source1, invalid_json_file, stdout_stub, stderr_stub, 'test')
        importer.import_mappings(total=1)
        self.assertTrue('Cannot map concept to itself.' in stderr_stub.getvalue())
    def test_import_job_for_change_in_data(self):
        stdout_stub = TestStream()
        stderr_stub = TestStream()
        mapping = Mapping(
            parent=self.source1,
            map_type='SAME-AS',
            from_concept=self.concept3,
            to_source=self.source1,
            to_concept_code='413532003',
            external_id='junk'
        )
        kwargs = {
            'parent_resource': self.source1,
        }
        Mapping.persist_new(mapping, self.user1, **kwargs)
        source_version = SourceVersion.get_latest_version_of(self.source1)
        source_version.mappings = [mapping.id]
        source_version.save()
        importer = MappingsImporter(self.source1, self.testfile, stdout_stub, stderr_stub, 'test')
        importer.import_mappings(total=1)
        self.assertTrue('**** Processed 1 out of 1 mappings - 1 updated, ****' in stdout_stub.getvalue())
        self.assertTrue(('Updated mapping with ID ' + mapping.id) in stdout_stub.getvalue())
        updated_mapping = Mapping.objects.get(to_concept_code='413532003')
        self.assertTrue(updated_mapping.retired)
        self.assertEquals(updated_mapping.external_id, '70279ABBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')
    def test_update_mapping_with_invalid_record(self):
        mapping = Mapping(
            parent=self.source1,
            map_type='SAME-AS',
            from_concept=self.concept3,
            to_concept=self.concept1
        )
        kwargs = {
            'parent_resource': self.source1,
        }
        Mapping.persist_new(mapping, self.user1, **kwargs)
        source_version = SourceVersion.get_latest_version_of(self.source1)
        source_version.mappings = [mapping.id]
        source_version.save()
        stderr_stub = TestStream()
        invalid_json_file = open('./integration_tests/fixtures/one_internal_invalid_mapping.json', 'rb')
        importer = MappingsImporter(self.source1, invalid_json_file, TestStream(), stderr_stub, 'test')
        importer.import_mappings(total=1)
        self.assertTrue(
            "Must specify either 'to_concept' or 'to_source' & 'to_concept_code'. Cannot specify both." in stderr_stub.getvalue())
    def test_import_valid_invalid_mappings(self):
        stdout_stub = TestStream()
        stderr_stub = TestStream()
        invalid_json_file = open('./integration_tests/fixtures/valid_invalid_mapping.json', 'rb')
        importer = MappingsImporter(self.source1, invalid_json_file, stdout_stub, stderr_stub, 'test')
        importer.import_mappings(total=5)
        self.assertTrue('Cannot map concept to itself.' in stderr_stub.getvalue())
        self.assertTrue("Must specify either 'to_concept' or 'to_source' & " in stderr_stub.getvalue())
        self.assertEquals(3, Mapping.objects.count())
        self.assertEquals(3, MappingVersion.objects.count()) | 
	mpl-2.0 | -2,314,512,562,089,775,000 | 52.085714 | 169 | 0.690734 | false | 
| 
	IllusionRom-deprecated/android_platform_external_chromium_org_tools_grit | 
	grit/node/misc_unittest.py | 
	7 | 
	15597 | 
	#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for misc.GritNode'''
import os
import sys
if __name__ == '__main__':
  sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import grd_reader
import grit.exception
from grit import util
from grit.format import rc
from grit.node import misc
class GritNodeUnittest(unittest.TestCase):
  def testUniqueNameAttribute(self):
    try:
      restree = grd_reader.Parse(
        util.PathFromRoot('grit/testdata/duplicate-name-input.xml'))
      self.fail('Expected parsing exception because of duplicate names.')
    except grit.exception.Parsing:
      pass  # Expected case
  def testReadFirstIdsFromFile(self):
    test_resource_ids = os.path.join(os.path.dirname(__file__), '..',
                                     'testdata', 'resource_ids')
    base_dir = os.path.dirname(test_resource_ids)
    src_dir, id_dict = misc._ReadFirstIdsFromFile(
        test_resource_ids,
        {
          'FOO': os.path.join(base_dir, 'bar'),
          'SHARED_INTERMEDIATE_DIR': os.path.join(base_dir,
                                                  'out/Release/obj/gen'),
        })
    self.assertEqual({}, id_dict.get('bar/file.grd', None))
    self.assertEqual({},
        id_dict.get('out/Release/obj/gen/devtools/devtools.grd', None))
class IfNodeUnittest(unittest.TestCase):
  def testIffyness(self):
    grd = grd_reader.Parse(StringIO.StringIO('''
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <messages>
            <if expr="'bingo' in defs">
              <message name="IDS_BINGO">
                Bingo!
              </message>
            </if>
            <if expr="'hello' in defs">
              <message name="IDS_HELLO">
                Hello!
              </message>
            </if>
            <if expr="lang == 'fr' or 'FORCE_FRENCH' in defs">
              <message name="IDS_HELLO" internal_comment="French version">
                Good morning
              </message>
            </if>
            <if expr="is_win">
              <message name="IDS_ISWIN">is_win</message>
            </if>
          </messages>
        </release>
      </grit>'''), dir='.')
    messages_node = grd.children[0].children[0]
    bingo_message = messages_node.children[0].children[0]
    hello_message = messages_node.children[1].children[0]
    french_message = messages_node.children[2].children[0]
    is_win_message = messages_node.children[3].children[0]
    self.assertTrue(bingo_message.name == 'message')
    self.assertTrue(hello_message.name == 'message')
    self.assertTrue(french_message.name == 'message')
    grd.SetOutputLanguage('fr')
    grd.SetDefines({'hello': '1'})
    active = set(grd.ActiveDescendants())
    self.failUnless(bingo_message not in active)
    self.failUnless(hello_message in active)
    self.failUnless(french_message in active)
    grd.SetOutputLanguage('en')
    grd.SetDefines({'bingo': 1})
    active = set(grd.ActiveDescendants())
    self.failUnless(bingo_message in active)
    self.failUnless(hello_message not in active)
    self.failUnless(french_message not in active)
    grd.SetOutputLanguage('en')
    grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'})
    active = set(grd.ActiveDescendants())
    self.failUnless(bingo_message in active)
    self.failUnless(hello_message not in active)
    self.failUnless(french_message in active)
    grd.SetOutputLanguage('en')
    grd.SetDefines({})
    self.failUnless(grd.target_platform == sys.platform)
    grd.SetTargetPlatform('darwin')
    active = set(grd.ActiveDescendants())
    self.failUnless(is_win_message not in active)
    grd.SetTargetPlatform('win32')
    active = set(grd.ActiveDescendants())
    self.failUnless(is_win_message in active)
  def testElsiness(self):
    grd = util.ParseGrdForUnittest('''
        <messages>
          <if expr="True">
            <then> <message name="IDS_YES1"></message> </then>
            <else> <message name="IDS_NO1"></message> </else>
          </if>
          <if expr="True">
            <then> <message name="IDS_YES2"></message> </then>
            <else> </else>
          </if>
          <if expr="True">
            <then> </then>
            <else> <message name="IDS_NO2"></message> </else>
          </if>
          <if expr="True">
            <then> </then>
            <else> </else>
          </if>
          <if expr="False">
            <then> <message name="IDS_NO3"></message> </then>
            <else> <message name="IDS_YES3"></message> </else>
          </if>
          <if expr="False">
            <then> <message name="IDS_NO4"></message> </then>
            <else> </else>
          </if>
          <if expr="False">
            <then> </then>
            <else> <message name="IDS_YES4"></message> </else>
          </if>
          <if expr="False">
            <then> </then>
            <else> </else>
          </if>
        </messages>''')
    included = [msg.attrs['name'] for msg in grd.ActiveDescendants()
                                  if msg.name == 'message']
    self.assertEqual(['IDS_YES1', 'IDS_YES2', 'IDS_YES3', 'IDS_YES4'], included)
  def testIffynessWithOutputNodes(self):
    grd = grd_reader.Parse(StringIO.StringIO('''
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <outputs>
          <output filename="uncond1.rc" type="rc_data" />
          <if expr="lang == 'fr' or 'hello' in defs">
            <output filename="only_fr.adm" type="adm" />
            <output filename="only_fr.plist" type="plist" />
          </if>
          <if expr="lang == 'ru'">
            <output filename="doc.html" type="document" />
          </if>
          <output filename="uncond2.adm" type="adm" />
          <output filename="iftest.h" type="rc_header">
            <emit emit_type='prepend'></emit>
          </output>
        </outputs>
      </grit>'''), dir='.')
    outputs_node = grd.children[0]
    uncond1_output = outputs_node.children[0]
    only_fr_adm_output = outputs_node.children[1].children[0]
    only_fr_plist_output = outputs_node.children[1].children[1]
    doc_output = outputs_node.children[2].children[0]
    uncond2_output = outputs_node.children[0]
    self.assertTrue(uncond1_output.name == 'output')
    self.assertTrue(only_fr_adm_output.name == 'output')
    self.assertTrue(only_fr_plist_output.name == 'output')
    self.assertTrue(doc_output.name == 'output')
    self.assertTrue(uncond2_output.name == 'output')
    grd.SetOutputLanguage('ru')
    grd.SetDefines({'hello': '1'})
    outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
    self.assertEquals(
        outputs,
        ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html',
         'uncond2.adm', 'iftest.h'])
    grd.SetOutputLanguage('ru')
    grd.SetDefines({'bingo': '2'})
    outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
    self.assertEquals(
        outputs,
        ['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h'])
    grd.SetOutputLanguage('fr')
    grd.SetDefines({'hello': '1'})
    outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
    self.assertEquals(
        outputs,
        ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm',
         'iftest.h'])
    grd.SetOutputLanguage('en')
    grd.SetDefines({'bingo': '1'})
    outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
    self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
    grd.SetOutputLanguage('fr')
    grd.SetDefines({'bingo': '1'})
    outputs = [output.GetFilename() for output in grd.GetOutputFiles()]
    self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h'])
  def testChildrenAccepted(self):
    grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <includes>
            <if expr="'bingo' in defs">
              <include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
            </if>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <include type="gif" name="ID_LOGO2" file="images/logo2.gif" />
              </if>
            </if>
          </includes>
          <structures>
            <if expr="'bingo' in defs">
              <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
            </if>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
              </if>
            </if>
          </structures>
          <messages>
            <if expr="'bingo' in defs">
              <message name="IDS_BINGO">Bingo!</message>
            </if>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <message name="IDS_BINGO">Bingo!</message>
              </if>
            </if>
          </messages>
        </release>
        <translations>
          <if expr="'bingo' in defs">
            <file lang="nl" path="nl_translations.xtb" />
          </if>
          <if expr="'bingo' in defs">
            <if expr="'hello' in defs">
              <file lang="nl" path="nl_translations.xtb" />
            </if>
          </if>
        </translations>
      </grit>'''), dir='.')
  def testIfBadChildrenNesting(self):
    # includes
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <includes>
            <if expr="'bingo' in defs">
              <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
            </if>
          </includes>
        </release>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    # messages
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <messages>
            <if expr="'bingo' in defs">
              <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
            </if>
          </messages>
        </release>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    # structures
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <structures>
            <if expr="'bingo' in defs">
              <message name="IDS_BINGO">Bingo!</message>
            </if>
          </structures>
        </release>
      </grit>''')
    # translations
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <translations>
          <if expr="'bingo' in defs">
            <message name="IDS_BINGO">Bingo!</message>
          </if>
        </translations>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    # same with nesting
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <includes>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
              </if>
            </if>
          </includes>
        </release>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <messages>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <structure type="dialog" name="IDD_ABOUTBOX" file="grit\\test\data\klonk.rc" encoding="utf-16" />
              </if>
            </if>
          </messages>
        </release>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <release seq="3">
          <structures>
            <if expr="'bingo' in defs">
              <if expr="'hello' in defs">
                <message name="IDS_BINGO">Bingo!</message>
              </if>
            </if>
          </structures>
        </release>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
    xml = StringIO.StringIO('''<?xml version="1.0"?>
      <grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
        <translations>
          <if expr="'bingo' in defs">
            <if expr="'hello' in defs">
              <message name="IDS_BINGO">Bingo!</message>
            </if>
          </if>
        </translations>
      </grit>''')
    self.assertRaises(grit.exception.UnexpectedChild, grd_reader.Parse, xml)
class ReleaseNodeUnittest(unittest.TestCase):
  def testPseudoControl(self):
    grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
      <grit latest_public_release="1" source_lang_id="en-US" current_release="2" base_dir=".">
        <release seq="1" allow_pseudo="false">
          <messages>
            <message name="IDS_HELLO">
              Hello
            </message>
          </messages>
          <structures>
            <structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="klonk.rc" />
          </structures>
        </release>
        <release seq="2">
          <messages>
            <message name="IDS_BINGO">
              Bingo
            </message>
          </messages>
          <structures>
            <structure type="menu" name="IDC_KLONKMENU" encoding="utf-16" file="klonk.rc" />
          </structures>
        </release>
      </grit>'''), util.PathFromRoot('grit/testdata'))
    grd.SetOutputLanguage('en')
    grd.RunGatherers()
    hello = grd.GetNodeById('IDS_HELLO')
    aboutbox = grd.GetNodeById('IDD_ABOUTBOX')
    bingo = grd.GetNodeById('IDS_BINGO')
    menu = grd.GetNodeById('IDC_KLONKMENU')
    for node in [hello, aboutbox]:
      self.failUnless(not node.PseudoIsAllowed())
    for node in [bingo, menu]:
      self.failUnless(node.PseudoIsAllowed())
    # TODO(benrg): There was a test here that formatting hello and aboutbox with
    # a pseudo language should fail, but they do not fail and the test was
    # broken and failed to catch it. Fix this.
    # Should not raise an exception since pseudo is allowed
    rc.FormatMessage(bingo, 'xyz-pseudo')
    rc.FormatStructure(menu, 'xyz-pseudo', '.')
if __name__ == '__main__':
  unittest.main()
 | 
	bsd-2-clause | -8,368,932,426,749,408,000 | 36.224344 | 113 | 0.563634 | false | 
| 
	illicitonion/givabit | 
	lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/tests/regressiontests/m2m_regress/tests.py | 
	39 | 
	3145 | 
	from django.core.exceptions import FieldError
from django.test import TestCase
from models import (SelfRefer, Tag, TagCollection, Entry, SelfReferChild,
    SelfReferChildSibling, Worksheet)
class M2MRegressionTests(TestCase):
    def assertRaisesErrorWithMessage(self, error, message, callable, *args, **kwargs):
        self.assertRaises(error, callable, *args, **kwargs)
        try:
            callable(*args, **kwargs)
        except error, e:
            self.assertEqual(message, str(e))
    def test_multiple_m2m(self):
        # Multiple m2m references to model must be distinguished when
        # accessing the relations through an instance attribute.
        s1 = SelfRefer.objects.create(name='s1')
        s2 = SelfRefer.objects.create(name='s2')
        s3 = SelfRefer.objects.create(name='s3')
        s1.references.add(s2)
        s1.related.add(s3)
        e1 = Entry.objects.create(name='e1')
        t1 = Tag.objects.create(name='t1')
        t2 = Tag.objects.create(name='t2')
        e1.topics.add(t1)
        e1.related.add(t2)
        self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"])
        self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"])
        self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"])
        self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"])
    def test_internal_related_name_not_in_error_msg(self):
        # The secret internal related names for self-referential many-to-many
        # fields shouldn't appear in the list when an error is made.
        self.assertRaisesErrorWithMessage(FieldError,
            "Cannot resolve keyword 'porcupine' into field. Choices are: id, name, references, related, selfreferchild, selfreferchildsibling",
            lambda: SelfRefer.objects.filter(porcupine='fred')
        )
    def test_m2m_inheritance_symmetry(self):
        # Test to ensure that the relationship between two inherited models
        # with a self-referential m2m field maintains symmetry
        sr_child = SelfReferChild(name="Hanna")
        sr_child.save()
        sr_sibling = SelfReferChildSibling(name="Beth")
        sr_sibling.save()
        sr_child.related.add(sr_sibling)
        self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"])
        self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"])
    def test_m2m_pk_field_type(self):
        # Regression for #11311 - The primary key for models in a m2m relation
        # doesn't have to be an AutoField
        w = Worksheet(id='abc')
        w.save()
        w.delete()
    def test_add_m2m_with_base_class(self):
        # Regression for #11956 -- You can add an object to a m2m with the
        # base class without causing integrity errors
        t1 = Tag.objects.create(name='t1')
        t2 = Tag.objects.create(name='t2')
        c1 = TagCollection.objects.create(name='c1')
        c1.tags = [t1,t2]
        c1 = TagCollection.objects.get(name='c1')
        self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"])
        self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"])
 | 
	apache-2.0 | -9,105,192,795,230,684,000 | 37.353659 | 143 | 0.646423 | false | 
| 
	Stanford-Online/edx-platform | 
	lms/djangoapps/courseware/tests/test_middleware.py | 
	19 | 
	1491 | 
	"""
Tests for courseware middleware
"""
from django.http import Http404
from django.test.client import RequestFactory
from nose.plugins.attrib import attr
from lms.djangoapps.courseware.exceptions import Redirect
from lms.djangoapps.courseware.middleware import RedirectMiddleware
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
class CoursewareMiddlewareTestCase(SharedModuleStoreTestCase):
    """Tests that courseware middleware is correctly redirected"""
    @classmethod
    def setUpClass(cls):
        super(CoursewareMiddlewareTestCase, cls).setUpClass()
        cls.course = CourseFactory.create()
    def test_process_404(self):
        """A 404 should not trigger anything"""
        request = RequestFactory().get("dummy_url")
        response = RedirectMiddleware().process_exception(
            request, Http404()
        )
        self.assertIsNone(response)
    def test_redirect_exceptions(self):
        """
        Unit tests for handling of Redirect exceptions.
        """
        request = RequestFactory().get("dummy_url")
        test_url = '/test_url'
        exception = Redirect(test_url)
        response = RedirectMiddleware().process_exception(
            request, exception
        )
        self.assertEqual(response.status_code, 302)
        target_url = response._headers['location'][1]
        self.assertTrue(target_url.endswith(test_url))
 | 
	agpl-3.0 | -1,999,649,033,062,407,700 | 32.886364 | 76 | 0.701543 | false | 
| 
	ecino/compassion-modules | 
	partner_communication/models/email.py | 
	4 | 
	2485 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
#    Releasing children from poverty in Jesus' name
#    @author: Emanuel Cino <[email protected]>
#
#    The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models, fields
class Email(models.Model):
    """ Add relation to communication configuration to track generated
    e-mails.
    """
    _inherit = 'mail.mail'
    ##########################################################################
    #                                 FIELDS                                 #
    ##########################################################################
    communication_config_id = fields.Many2one('partner.communication.config')
    @api.multi
    def send(self, auto_commit=False, raise_exception=False):
        """ Create communication for partner, if not already existing.
        """
        comm_obj = self.env['partner.communication.job'].with_context(
            {}).with_context(no_print=True)
        config = self.env.ref(
            'partner_communication.default_communication')
        for email in self.exists().filtered(
                lambda e: e.mail_message_id.model !=
                'partner.communication.job'):
            communication = comm_obj.search([('email_id', '=', email.id)])
            if not communication:
                for partner in email.recipient_ids.filtered(
                        lambda p: not p.user_ids or reduce(
                            lambda u1, u2: u1 and u2,
                            p.user_ids.mapped('share'))):
                    comm_obj.create({
                        'config_id': config.id,
                        'partner_id': partner.id,
                        'user_id': email.author_id.user_ids.id,
                        'object_ids': email.recipient_ids.ids,
                        'state': 'done',
                        'auto_send': False,
                        'email_id': email.id,
                        'sent_date': fields.Datetime.now(),
                        'body_html': email.body_html,
                        'subject': email.subject,
                        'ir_attachment_ids': [(6, 0, email.attachment_ids.ids)]
                    })
        return super(Email, self).send(auto_commit, raise_exception)
 | 
	agpl-3.0 | 5,994,771,333,309,306,000 | 44.181818 | 79 | 0.444668 | false | 
| 
	lscheinkman/nupic | 
	src/nupic/data/dict_utils.py | 
	49 | 
	5295 | 
	# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc.  Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program.  If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
# TODO: Note the functions 'rUpdate' are duplicated in
# the swarming.hypersearch.utils.py module
class DictObj(dict):
  """Dictionary that allows attribute-like access to its elements.
  Attributes are read-only."""
  def __getattr__(self, name):
    if name == '__deepcopy__':
      return super(DictObj, self).__getattribute__("__deepcopy__")
    return self[name]
  def __setstate__(self, state):
    for k, v in state.items():
      self[k] = v
def rUpdate(original, updates):
  """Recursively updates the values in original with the values from updates."""
  # Keep a list of the sub-dictionaries that need to be updated to avoid having
  # to use recursion (which could fail for dictionaries with a lot of nesting.
  dictPairs = [(original, updates)]
  while len(dictPairs) > 0:
    original, updates = dictPairs.pop()
    for k, v in updates.iteritems():
      if k in original and isinstance(original[k], dict) and isinstance(v, dict):
        dictPairs.append((original[k], v))
      else:
        original[k] = v
def rApply(d, f):
  """Recursively applies f to the values in dict d.
  Args:
    d: The dict to recurse over.
    f: A function to apply to values in d that takes the value and a list of
        keys from the root of the dict to the value.
  """
  remainingDicts = [(d, ())]
  while len(remainingDicts) > 0:
    current, prevKeys = remainingDicts.pop()
    for k, v in current.iteritems():
      keys = prevKeys + (k,)
      if isinstance(v, dict):
        remainingDicts.insert(0, (v, keys))
      else:
        f(v, keys)
def find(d, target):
  remainingDicts = [d]
  while len(remainingDicts) > 0:
    current = remainingDicts.pop()
    for k, v in current.iteritems():
      if k == target:
        return v
      if isinstance(v, dict):
        remainingDicts.insert(0, v)
  return None
def get(d, keys):
  for key in keys:
    d = d[key]
  return d
def set(d, keys, value):
  for key in keys[:-1]:
    d = d[key]
  d[keys[-1]] = value
def dictDiffAndReport(da, db):
  """ Compares two python dictionaries at the top level and report differences,
  if any, to stdout
  da:             first dictionary
  db:             second dictionary
  Returns:        The same value as returned by dictDiff() for the given args
  """
  differences = dictDiff(da, db)
  if not differences:
    return differences
  if differences['inAButNotInB']:
    print ">>> inAButNotInB: %s" % differences['inAButNotInB']
  if differences['inBButNotInA']:
    print ">>> inBButNotInA: %s" % differences['inBButNotInA']
  for key in differences['differentValues']:
    print ">>> da[%s] != db[%s]" % (key, key)
    print "da[%s] = %r" % (key, da[key])
    print "db[%s] = %r" % (key, db[key])
  return differences
def dictDiff(da, db):
  """ Compares two python dictionaries at the top level and return differences
  da:             first dictionary
  db:             second dictionary
  Returns:        None if dictionaries test equal; otherwise returns a
                  dictionary as follows:
                  {
                    'inAButNotInB':
                        <sequence of keys that are in da but not in db>
                    'inBButNotInA':
                        <sequence of keys that are in db but not in da>
                    'differentValues':
                        <sequence of keys whose corresponding values differ
                         between da and db>
                  }
  """
  different = False
  resultDict = dict()
  resultDict['inAButNotInB'] = set(da) - set(db)
  if resultDict['inAButNotInB']:
    different = True
  resultDict['inBButNotInA'] = set(db) - set(da)
  if resultDict['inBButNotInA']:
    different = True
  resultDict['differentValues'] = []
  for key in (set(da) - resultDict['inAButNotInB']):
    comparisonResult = da[key] == db[key]
    if isinstance(comparisonResult, bool):
      isEqual = comparisonResult
    else:
      # This handles numpy arrays (but only at the top level)
      isEqual = comparisonResult.all()
    if not isEqual:
      resultDict['differentValues'].append(key)
      different = True
  assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or
          resultDict['differentValues']) and different) or not different)
  return resultDict if different else None
 | 
	agpl-3.0 | -6,650,219,823,586,886,000 | 29.606936 | 81 | 0.62474 | false | 
| 
	GIC-de/ncclient | 
	test/unit/devices/test_junos.py | 
	2 | 
	2800 | 
	import unittest
from ncclient.devices.junos import *
import ncclient.transport
from mock import patch
import paramiko
import sys
xml = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
        <xsl:output method="xml" indent="no"/>
        <xsl:template match="/|comment()|processing-instruction()">
            <xsl:copy>
                <xsl:apply-templates/>
            </xsl:copy>
        </xsl:template>
        <xsl:template match="*">
            <xsl:element name="{local-name()}">
                <xsl:apply-templates select="@*|node()"/>
            </xsl:element>
        </xsl:template>
        <xsl:template match="@*">
            <xsl:attribute name="{local-name()}">
                <xsl:value-of select="."/>
            </xsl:attribute>
        </xsl:template>
        </xsl:stylesheet>
        '''
xml2 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos">
<routing-engine>
<name>reX</name>
<commit-success/>
<ok/>
</rpc-reply>"""
xml3 = """<rpc-reply xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos">
<routing-engine>
<name>reX</name>
<commit-success/>
<routing-engine/>
<ok/>
</rpc-reply>"""
class TestJunosDevice(unittest.TestCase):
    def setUp(self):
        self.obj = JunosDeviceHandler({'name': 'junos'})
    @patch('paramiko.Channel.exec_command')
    @patch('paramiko.Transport.__init__')
    @patch('paramiko.Transport.open_channel')
    def test_handle_connection_exceptions(
            self, mock_open, mock_init, mock_channel):
        session = ncclient.transport.SSHSession(self.obj)
        session._channel_id = 100
        mock_init.return_value = None
        session._transport = paramiko.Transport()
        channel = paramiko.Channel(100)
        mock_open.return_value = channel
        self.obj.handle_connection_exceptions(session)
        self.assertEqual(channel._name, "netconf-command-100")
        self.assertEqual(
            mock_channel.call_args_list[0][0][0],
            "xml-mode netconf need-trailer")
    def test_additional_operations(self):
        dict = {}
        dict["rpc"] = ExecuteRpc
        dict["get_configuration"] = GetConfiguration
        dict["load_configuration"] = LoadConfiguration
        dict["compare_configuration"] = CompareConfiguration
        dict["command"] = Command
        dict["reboot"] = Reboot
        dict["halt"] = Halt
        dict["commit"] = Commit
        self.assertEqual(dict, self.obj.add_additional_operations())
    def test_transform_reply(self):
        if sys.version >= '3':
            reply = xml.encode('utf-8')
        else:
            reply = xml
        self.assertEqual(self.obj.transform_reply(), reply)
    def test_perform_quality_check(self):
        self.assertFalse(self.obj.perform_qualify_check())
 | 
	apache-2.0 | 7,348,896,880,066,941,000 | 30.818182 | 88 | 0.604286 | false | 
| 
	mfherbst/spack | 
	var/spack/repos/builtin/packages/sw4lite/package.py | 
	2 | 
	3776 | 
	##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class Sw4lite(MakefilePackage):
    """Sw4lite is a bare bone version of SW4 intended for testing
    performance optimizations in a few important numerical kernels of SW4."""
    tags = ['proxy-app', 'ecp-proxy-app']
    homepage = "https://geodynamics.org/cig/software/sw4"
    url      = "https://github.com/geodynamics/sw4lite/archive/v1.0.zip"
    git      = "https://github.com/geodynamics/sw4lite.git"
    version('develop', branch='master')
    version('1.0', '3d911165f4f2ff6d5f9c1bd56ab6723f')
    variant('openmp', default=True, description='Build with OpenMP support')
    variant('precision', default='double', values=('float', 'double'),
            multi=False, description='Floating point precision')
    variant('ckernel', default=False, description='C or Fortran kernel')
    depends_on('blas')
    depends_on('lapack')
    depends_on('mpi')
    parallel = False
    @property
    def build_targets(self):
        targets = []
        spec = self.spec
        if spec.variants['precision'].value == 'double':
            cxxflags = ['-I../src', '-I../src/double']
        else:
            cxxflags = ['-I../src', '-I../src/float']
        cflags = []
        fflags = []
        if '+openmp' in self.spec:
            cflags.append('-DSW4_OPENMP')
            cflags.append(self.compiler.openmp_flag)
            cxxflags.append('-DSW4_OPENMP')
            cxxflags.append(self.compiler.openmp_flag)
            fflags.append(self.compiler.openmp_flag)
        if spec.variants['ckernel'].value is True:
            cxxflags.append('-DSW4_CROUTINES')
            targets.append('ckernel=yes')
        targets.append('FC=' + spec['mpi'].mpifc)
        targets.append('CXX=' + spec['mpi'].mpicxx)
        targets.append('CFLAGS={0}'.format(' '.join(cflags)))
        targets.append('CXXFLAGS={0}'.format(' '.join(cxxflags)))
        targets.append('FFLAGS={0}'.format(' '.join(fflags)))
        targets.append('EXTRA_CXX_FLAGS=')
        targets.append('EXTRA_FORT_FLAGS=')
        lapack_blas = spec['lapack'].libs + spec['blas'].libs
        if spec.satisfies('%gcc'):
            targets.append('EXTRA_LINK_FLAGS={0} -lgfortran'
                           .format(lapack_blas.ld_flags))
        else:
            targets.append('EXTRA_LINK_FLAGS={0}'.format(lapack_blas.ld_flags))
        return targets
    def install(self, spec, prefix):
        mkdir(prefix.bin)
        exe_name = glob.glob('*/sw4lite')[0]
        install(exe_name, prefix.bin)
        install_tree('tests', prefix.tests)
 | 
	lgpl-2.1 | -5,484,572,795,504,979,000 | 37.530612 | 79 | 0.620233 | false | 
| 
	servo-automation/highfive | 
	tests/api_provider_tests.py | 
	2 | 
	7671 | 
	from highfive.runner import Configuration, Response
from highfive.api_provider.interface import APIProvider, CONTRIBUTORS_STORE_KEY, DEFAULTS
from handler_tests import TestStore
from datetime import datetime
from dateutil.parser import parse as datetime_parse
from unittest import TestCase
def create_config():
    config = Configuration()
    config.name = 'test_app'
    config.imgur_client_id = None
    return config
class APIProviderTests(TestCase):
    def test_api_init(self):
        '''The default interface will only initialize the app name and payload.'''
        config = Configuration()
        config.name = 'test_app'
        api = APIProvider(config=config, payload={})
        self.assertEqual(api.name, 'test_app')
        self.assertEqual(api.payload, {})
        self.assertEqual(api.config, config)
        for attr in DEFAULTS:
            self.assertTrue(getattr(api, attr) is None)
    def test_api_issue_payload(self):
        '''
        If the payload is related to an issue (or an issue comment in an issue/PR),
        then this should've initialized the commonly used issue-related stuff.
        '''
        payload = {
            'issue': {
                'user': {
                    'login': 'Foobar'
                },
                'state': 'open',
                'labels': [
                    { 'name': 'Foo' },
                    { 'name': 'Bar' }
                ],
                'number': 200,
                'updated_at': '1970-01-01T00:00:00Z'
            },
        }
        api = APIProvider(config=create_config(), payload=payload)
        self.assertEqual(api.payload, payload)
        self.assertFalse(api.is_pull)
        self.assertTrue(api.is_open)
        self.assertEqual(api.creator, 'foobar')
        self.assertEqual(api.last_updated, payload['issue']['updated_at'])
        self.assertEqual(api.number, '200')
        self.assertTrue(api.pull_url is None)
        self.assertEqual(api.labels, ['foo', 'bar'])
    def test_api_pr_payload(self):
        '''
        If the payload is related to a PR, then the commonly used PR attributes
        should've been initialized.
        '''
        payload = {
            'pull_request': {
                'user': {
                    'login': 'Foobar'
                },
                'assignee': {
                    'login': 'Baz'
                },
                'state': 'open',
                'number': 50,
                'url': 'some url',
                'updated_at': '1970-01-01T00:00:00Z'
            }
        }
        api = APIProvider(config=create_config(), payload=payload)
        self.assertEqual(api.payload, payload)
        self.assertTrue(api.is_open)
        self.assertTrue(api.is_pull)
        self.assertEqual(api.creator, 'foobar')
        self.assertEqual(api.assignee, 'baz')
        self.assertEqual(api.last_updated, payload['pull_request']['updated_at'])
        self.assertEqual(api.number, '50')
        self.assertEqual(api.pull_url, 'some url')
    def test_api_other_events(self):
        '''Test for payload belonging to other events such as comment, label, etc.'''
        payload = {         # This is a hypothetical payload just for tests
            'sender': {
                'login': 'Someone'
            },
            'label': {
                'name': 'Label'
            },
            'repository': {
                'owner': {
                    'login': 'foo'
                },
                'name': 'bar'
            },
            'comment': {
                'body': 'Hello, world!',
            },
            'issue': {
                'pull_request': {},
                'labels': [],
                'user': {
                    'login': 'Foobar'
                },
                'state': 'open',
                'number': 200,
            }
        }
        api = APIProvider(config=create_config(), payload=payload)
        self.assertTrue(api.is_pull)
        self.assertEqual(api.sender, 'someone')
        self.assertEqual(api.comment, 'Hello, world!')
        self.assertEqual(api.current_label, 'label')
        self.assertEqual(api.owner, 'foo')
        self.assertEqual(api.repo, 'bar')
    def test_api_imgur_upload(self):
        '''Test Imgur API upload'''
        config = create_config()
        api = APIProvider(config=config, payload={})
        resp = api.post_image_to_imgur('some data')
        self.assertTrue(resp is None)       # No client ID - returns None
        config.imgur_client_id = 'foobar'
        def test_valid_request(method, url, data, headers):
            self.assertEqual(headers['Authorization'], 'Client-ID foobar')
            self.assertEqual(method, 'POST')
            self.assertEqual(url, 'https://api.imgur.com/3/image')
            self.assertEqual(data, {'image': 'some data'})
            return Response(data={'data': {'link': 'hello'}})
        tests = [
            (test_valid_request, 'hello'),
            (lambda method, url, data, headers: Response(data='', code=400), None),
            (lambda method, url, data, headers: Response(data=''), None)
        ]
        for func, expected in tests:
            resp = api.post_image_to_imgur('some data', json_request=func)
            self.assertEqual(resp, expected)
    def test_contributors_update(self):
        '''
        Contributors list (cache) live only for an hour (by default). Once it's outdated,
        the next call to `get_contributors` calls `fetch_contributors`, writes it to the store
        and returns the list. Any calls within the next hour will return the existing contributors
        without calling the API.
        '''
        class TestAPI(APIProvider):
            fetched = False
            def fetch_contributors(self):
                self.fetched = True
                return []
        config = create_config()
        api = TestAPI(config=config, payload={}, store=None)
        self.assertFalse(api.fetched)
        api.get_contributors()
        # No store. This will always call the API.
        self.assertTrue(api.fetched)
        store = TestStore()
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        now = datetime.now()
        api.get_contributors()
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        # Store doesn't have contributors. It's been updated for the first time.
        self.assertTrue(updated_time >= now)
        self.assertTrue(api.fetched)
        store = TestStore()
        store.write_object(CONTRIBUTORS_STORE_KEY,
                           { 'last_update_time': str(now), 'list': ['booya'] })
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        api.get_contributors()
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        # Called within a cycle - no fetch occurs.
        self.assertEqual(updated_time, now)
        self.assertFalse(api.fetched)
        store = TestStore()
        store.write_object(CONTRIBUTORS_STORE_KEY,
                           { 'last_update_time': str(now), 'list': ['booya'] })
        api = TestAPI(config=config, payload={}, store=store)
        self.assertFalse(api.fetched)
        api.get_contributors(fetch=True)
        # When `fetch` is enabled, API is called regardless.
        self.assertTrue(api.fetched)
        data = store.get_object(CONTRIBUTORS_STORE_KEY)
        updated_time = datetime_parse(data['last_update_time'])
        self.assertTrue(updated_time > now)
 | 
	mpl-2.0 | 4,291,612,290,385,657,300 | 35.014085 | 98 | 0.551688 | false | 
| 
	djeraseit/PredictionIO | 
	examples/experimental/scala-local-friend-recommendation/file_random.py | 
	48 | 
	4883 | 
	import sys
import random
read_file = open("data/user_profile.txt", 'r')
write_file = open("data/mini_user_profile.txt", 'w')
number_of_lines = int(sys.argv[1])
number_of_items = int(sys.argv[2])
#record number of lines
count = 0
random_num_list = []
# loop through the file to get number of lines in the file
for line in read_file:
    count += 1
print "generating random numbers"
# generating a list of random lines to read from
for i in range(0, number_of_lines):
    random_num_list.append(random.randint(0, count))
#get rid of any duplicates
no_duplicate_list = list(set(random_num_list))
#sort the list
no_duplicate_list.sort()
#print no_duplicate_list
#go to file begining
read_file.seek(0)
count = 0
index = 0
user_id_list = []
print "getting lines from user_profile"
for line in read_file:
    if count == no_duplicate_list[index]:
        write_file.write(line)
        index += 1
        user_id_list.append(int(line.split()[0]))
        if index == len(no_duplicate_list):
            break
    count += 1
#user_id_list is sorted
user_id_list = map(str, user_id_list)
user_id_list.sort()
#print user_id_list
print "user_id finished"
print "getting lines from item"
read_file = open("data/item.txt", 'r')
write_file = open("data/mini_item.txt", 'w')
count = 0
random_num_list = []
for line in read_file:
    count += 1
for i in range(0, number_of_items):
    random_num_list.append(random.randint(0, count))
#no duplicate
random_num_list = list(set(random_num_list))
random_num_list.sort()
read_file.seek(0)
count = 0
index = 0
item_id_list = []
for line in read_file:
    if count == random_num_list[index]:
        write_file.write(line)
        index += 1
        item_id_list.append(int(line.split()[0]))
        if index == len(random_num_list):
            break
    count += 1
print "item finished"
print "getting mini user_key_word"
read_file = open("data/user_key_word.txt", 'r')
write_file = open("data/mini_user_key_word.txt", 'w')
#record number of lines
count = 0
index = 0
# loop through the file to get number of lines in the file
for line in read_file:
    if line.split()[0] == user_id_list[index]:
        write_file.write(line)
        index += 1
        if index == len(user_id_list):
            #print "break"
            break
print "user keyword finished"
#go to file begining
#getting the user_sns_small
print "getting user sns"
#print user_id_list
read_file = open("data/user_sns.txt", 'r')
#write_file = open("data/mini_user_sns_small.txt", 'w')
user_sns_list = []
index = 0
met = False
count = 0
for line in read_file:
    count += 1
    #print count
    #Same user multiple following
    if met:
        if line.split()[0] != user_id_list[index]:
            index += 1
            met = False
            if index == len(user_id_list):
                break
    if line.split()[0] == user_id_list[index]:
        #print "here"
        user_sns_list.append(line)
        met = True
    # if the current line's user is greater than the user list, that means
    # the user doesn't follow or are following, then we move to next user
    if line.split()[0] > user_id_list[index]:
        index += 1
        if index == len(user_id_list):
            break
#print user_sns_list
write_file = open("data/mini_user_sns.txt",'w')
for line in user_sns_list:
    for user_id in user_id_list:
        if line.split()[1] == user_id:
            write_file.write(line)
            break
print "sns got"
print "getting user action"
#for line in write_file:
read_file = open("data/user_action.txt", 'r')
user_action_list = []
index = 0
met = False
count = 0
for line in read_file:
    count += 1
    #print count
    if met:
        if line.split()[0] != user_id_list[index]:
            index += 1
            met = False
            if index == len(user_id_list):
                break
    if line.split()[0] == user_id_list[index]:
        #print "here"
        user_action_list.append(line)
        met = True
    if line.split()[0] > user_id_list[index]:
        index += 1
        if index == len(user_id_list):
            break
#print user_action_list
write_file = open("data/mini_user_action.txt",'w')
for line in user_action_list:
    for user_id in user_id_list:
        if line.split()[1] == user_id:
            write_file.write(line)
            break
print "user action got"
print "getting rec_log_train"
user_set = set(user_id_list)
item_set = set(item_id_list)
read_file = open("data/rec_log_train.txt", 'r')
write_file = open("data/mini_rec_log_train.txt",'w')
count = 0
#for item in item_set:
#    print type(item)
#for user in user_set:
#    print type(user)
for line in read_file:
    words = line.split()
#    if words[0] in user_set and (words[1] in user_set or words[1] in item_set):
    if words[0] in user_set and words[1] in item_set:
        write_file.write(line)
    print count
    count += 1
print "Done"
 | 
	apache-2.0 | -7,507,922,459,721,964,000 | 24.7 | 80 | 0.618063 | false | 
| 
	Softmotions/edx-platform | 
	common/test/acceptance/pages/lms/discussion.py | 
	36 | 
	25473 | 
	from contextlib import contextmanager
from bok_choy.javascript import wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from .course_page import CoursePage
class DiscussionPageMixin(object):
    def is_ajax_finished(self):
        return self.browser.execute_script("return jQuery.active") == 0
class DiscussionThreadPage(PageObject, DiscussionPageMixin):
    url = None
    def __init__(self, browser, thread_selector):
        super(DiscussionThreadPage, self).__init__(browser)
        self.thread_selector = thread_selector
    def _find_within(self, selector):
        """
        Returns a query corresponding to the given CSS selector within the scope
        of this thread page
        """
        return self.q(css=self.thread_selector + " " + selector)
    def is_browser_on_page(self):
        return self.q(css=self.thread_selector).present
    def _get_element_text(self, selector):
        """
        Returns the text of the first element matching the given selector, or
        None if no such element exists
        """
        text_list = self._find_within(selector).text
        return text_list[0] if text_list else None
    def _is_element_visible(self, selector):
        query = self._find_within(selector)
        return query.present and query.visible
    @contextmanager
    def _secondary_action_menu_open(self, ancestor_selector):
        """
        Given the selector for an ancestor of a secondary menu, return a context
        manager that will open and close the menu
        """
        self._find_within(ancestor_selector + " .action-more").click()
        EmptyPromise(
            lambda: self._is_element_visible(ancestor_selector + " .actions-dropdown"),
            "Secondary action menu opened"
        ).fulfill()
        yield
        if self._is_element_visible(ancestor_selector + " .actions-dropdown"):
            self._find_within(ancestor_selector + " .action-more").click()
            EmptyPromise(
                lambda: not self._is_element_visible(ancestor_selector + " .actions-dropdown"),
                "Secondary action menu closed"
            ).fulfill()
    def get_group_visibility_label(self):
        """
        Returns the group visibility label shown for the thread.
        """
        return self._get_element_text(".group-visibility-label")
    def get_response_total_text(self):
        """Returns the response count text, or None if not present"""
        return self._get_element_text(".response-count")
    def get_num_displayed_responses(self):
        """Returns the number of responses actually rendered"""
        return len(self._find_within(".discussion-response"))
    def get_shown_responses_text(self):
        """Returns the shown response count text, or None if not present"""
        return self._get_element_text(".response-display-count")
    def get_load_responses_button_text(self):
        """Returns the load more responses button text, or None if not present"""
        return self._get_element_text(".load-response-button")
    def load_more_responses(self):
        """Clicks the load more responses button and waits for responses to load"""
        self._find_within(".load-response-button").click()
        EmptyPromise(
            self.is_ajax_finished,
            "Loading more Responses"
        ).fulfill()
    def has_add_response_button(self):
        """Returns true if the add response button is visible, false otherwise"""
        return self._is_element_visible(".add-response-btn")
    def click_add_response_button(self):
        """
        Clicks the add response button and ensures that the response text
        field receives focus
        """
        self._find_within(".add-response-btn").first.click()
        EmptyPromise(
            lambda: self._find_within(".discussion-reply-new textarea:focus").present,
            "Response field received focus"
        ).fulfill()
    @wait_for_js
    def is_response_editor_visible(self, response_id):
        """Returns true if the response editor is present, false otherwise"""
        return self._is_element_visible(".response_{} .edit-post-body".format(response_id))
    @wait_for_js
    def is_discussion_body_visible(self):
        return self._is_element_visible(".post-body")
    def is_mathjax_preview_available(self):
        return self.q(css=".MathJax_Preview").text[0] == ""
    def is_mathjax_rendered(self):
        return self._is_element_visible(".MathJax")
    def is_response_visible(self, comment_id):
        """Returns true if the response is viewable onscreen"""
        return self._is_element_visible(".response_{} .response-body".format(comment_id))
    def is_response_editable(self, response_id):
        """Returns true if the edit response button is present, false otherwise"""
        with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
            return self._is_element_visible(".response_{} .discussion-response .action-edit".format(response_id))
    def get_response_body(self, response_id):
        return self._get_element_text(".response_{} .response-body".format(response_id))
    def start_response_edit(self, response_id):
        """Click the edit button for the response, loading the editing view"""
        with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
            self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click()
            EmptyPromise(
                lambda: self.is_response_editor_visible(response_id),
                "Response edit started"
            ).fulfill()
    def get_link_href(self):
        """Extracts href attribute of the referenced link"""
        link_href = self._find_within(".post-body p a").attrs('href')
        return link_href[0] if link_href else None
    def get_response_vote_count(self, response_id):
        return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
    def vote_response(self, response_id):
        current_count = self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
        self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click()
        self.wait_for_ajax()
        EmptyPromise(
            lambda: current_count != self.get_response_vote_count(response_id),
            "Response is voted"
        ).fulfill()
    def is_response_reported(self, response_id):
        return self._is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id))
    def report_response(self, response_id):
        with self._secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
            self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click()
            self.wait_for_ajax()
            EmptyPromise(
                lambda: self.is_response_reported(response_id),
                "Response is reported"
            ).fulfill()
    def is_response_endorsed(self, response_id):
        return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id))
    def endorse_response(self, response_id):
        self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click()
        self.wait_for_ajax()
        EmptyPromise(
            lambda: self.is_response_endorsed(response_id),
            "Response edit started"
        ).fulfill()
    def set_response_editor_value(self, response_id, new_body):
        """Replace the contents of the response editor"""
        self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body)
    def submit_response_edit(self, response_id, new_response_body):
        """Click the submit button on the response editor"""
        self._find_within(".response_{} .discussion-response .post-update".format(response_id)).first.click()
        EmptyPromise(
            lambda: (
                not self.is_response_editor_visible(response_id) and
                self.is_response_visible(response_id) and
                self.get_response_body(response_id) == new_response_body
            ),
            "Comment edit succeeded"
        ).fulfill()
    def is_show_comments_visible(self, response_id):
        """Returns true if the "show comments" link is visible for a response"""
        return self._is_element_visible(".response_{} .action-show-comments".format(response_id))
    def show_comments(self, response_id):
        """Click the "show comments" link for a response"""
        self._find_within(".response_{} .action-show-comments".format(response_id)).first.click()
        EmptyPromise(
            lambda: self._is_element_visible(".response_{} .comments".format(response_id)),
            "Comments shown"
        ).fulfill()
    def is_add_comment_visible(self, response_id):
        """Returns true if the "add comment" form is visible for a response"""
        return self._is_element_visible("#wmd-input-comment-body-{}".format(response_id))
    def is_comment_visible(self, comment_id):
        """Returns true if the comment is viewable onscreen"""
        return self._is_element_visible("#comment_{} .response-body".format(comment_id))
    def get_comment_body(self, comment_id):
        return self._get_element_text("#comment_{} .response-body".format(comment_id))
    def is_comment_deletable(self, comment_id):
        """Returns true if the delete comment button is present, false otherwise"""
        with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
            return self._is_element_visible("#comment_{} .action-delete".format(comment_id))
    def delete_comment(self, comment_id):
        with self.handle_alert():
            with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
                self._find_within("#comment_{} .action-delete".format(comment_id)).first.click()
        EmptyPromise(
            lambda: not self.is_comment_visible(comment_id),
            "Deleted comment was removed"
        ).fulfill()
    def is_comment_editable(self, comment_id):
        """Returns true if the edit comment button is present, false otherwise"""
        with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
            return self._is_element_visible("#comment_{} .action-edit".format(comment_id))
    def is_comment_editor_visible(self, comment_id):
        """Returns true if the comment editor is present, false otherwise"""
        return self._is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id))
    def _get_comment_editor_value(self, comment_id):
        return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0]
    def start_comment_edit(self, comment_id):
        """Click the edit button for the comment, loading the editing view"""
        old_body = self.get_comment_body(comment_id)
        with self._secondary_action_menu_open("#comment_{}".format(comment_id)):
            self._find_within("#comment_{} .action-edit".format(comment_id)).first.click()
            EmptyPromise(
                lambda: (
                    self.is_comment_editor_visible(comment_id) and
                    not self.is_comment_visible(comment_id) and
                    self._get_comment_editor_value(comment_id) == old_body
                ),
                "Comment edit started"
            ).fulfill()
    def set_comment_editor_value(self, comment_id, new_body):
        """Replace the contents of the comment editor"""
        self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body)
    def submit_comment_edit(self, comment_id, new_comment_body):
        """Click the submit button on the comment editor"""
        self._find_within("#comment_{} .post-update".format(comment_id)).first.click()
        EmptyPromise(
            lambda: (
                not self.is_comment_editor_visible(comment_id) and
                self.is_comment_visible(comment_id) and
                self.get_comment_body(comment_id) == new_comment_body
            ),
            "Comment edit succeeded"
        ).fulfill()
    def cancel_comment_edit(self, comment_id, original_body):
        """Click the cancel button on the comment editor"""
        self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click()
        EmptyPromise(
            lambda: (
                not self.is_comment_editor_visible(comment_id) and
                self.is_comment_visible(comment_id) and
                self.get_comment_body(comment_id) == original_body
            ),
            "Comment edit was canceled"
        ).fulfill()
class DiscussionSortPreferencePage(CoursePage):
    """
    Page that contain the discussion board with sorting options
    """
    def __init__(self, browser, course_id):
        super(DiscussionSortPreferencePage, self).__init__(browser, course_id)
        self.url_path = "discussion/forum"
    def is_browser_on_page(self):
        """
        Return true if the browser is on the right page else false.
        """
        return self.q(css="body.discussion .forum-nav-sort-control").present
    def get_selected_sort_preference(self):
        """
        Return the text of option that is selected for sorting.
        """
        options = self.q(css="body.discussion .forum-nav-sort-control option")
        return options.filter(lambda el: el.is_selected())[0].get_attribute("value")
    def change_sort_preference(self, sort_by):
        """
        Change the option of sorting by clicking on new option.
        """
        self.q(css="body.discussion .forum-nav-sort-control option[value='{0}']".format(sort_by)).click()
    def refresh_page(self):
        """
        Reload the page.
        """
        self.browser.refresh()
class DiscussionTabSingleThreadPage(CoursePage):
    def __init__(self, browser, course_id, discussion_id, thread_id):
        super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id)
        self.thread_page = DiscussionThreadPage(
            browser,
            "body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
        )
        self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format(
            discussion_id=discussion_id, thread_id=thread_id
        )
    def is_browser_on_page(self):
        return self.thread_page.is_browser_on_page()
    def __getattr__(self, name):
        return getattr(self.thread_page, name)
    def close_open_thread(self):
        with self.thread_page._secondary_action_menu_open(".forum-thread-main-wrapper"):
            self._find_within(".forum-thread-main-wrapper .action-close").first.click()
    @wait_for_js
    def is_window_on_top(self):
        """
        Check if window's scroll is at top
        """
        return self.browser.execute_script("return $('html, body').offset().top") == 0
    def _thread_is_rendered_successfully(self, thread_id):
        return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible
    def click_and_open_thread(self, thread_id):
        """
        Click specific thread on the list.
        """
        thread_selector = "li[data-id='{}']".format(thread_id)
        self.q(css=thread_selector).first.click()
        EmptyPromise(
            lambda: self._thread_is_rendered_successfully(thread_id),
            "Thread has been rendered"
        ).fulfill()
    def check_threads_rendered_successfully(self, thread_count):
        """
        Count the number of threads available on page.
        """
        return len(self.q(css=".forum-nav-thread").results) == thread_count
    def check_window_is_on_top(self):
        """
        Check window is on top of the page
        """
        EmptyPromise(
            self.is_window_on_top,
            "Window is on top"
        ).fulfill()
class InlineDiscussionPage(PageObject):
    url = None
    def __init__(self, browser, discussion_id):
        super(InlineDiscussionPage, self).__init__(browser)
        self._discussion_selector = (
            ".discussion-module[data-discussion-id='{discussion_id}'] ".format(
                discussion_id=discussion_id
            )
        )
    def _find_within(self, selector):
        """
        Returns a query corresponding to the given CSS selector within the scope
        of this discussion page
        """
        return self.q(css=self._discussion_selector + " " + selector)
    def is_browser_on_page(self):
        self.wait_for_ajax()
        return self.q(css=self._discussion_selector).present
    def is_discussion_expanded(self):
        return self._find_within(".discussion").present
    def expand_discussion(self):
        """Click the link to expand the discussion"""
        self._find_within(".discussion-show").first.click()
        EmptyPromise(
            self.is_discussion_expanded,
            "Discussion expanded"
        ).fulfill()
    def get_num_displayed_threads(self):
        return len(self._find_within(".discussion-thread"))
    def has_thread(self, thread_id):
        """Returns true if this page is showing the thread with the specified id."""
        return self._find_within('.discussion-thread#thread_{}'.format(thread_id)).present
    def element_exists(self, selector):
        return self.q(css=self._discussion_selector + " " + selector).present
    def is_new_post_opened(self):
        return self._find_within(".new-post-article").visible
    def click_element(self, selector):
        self.wait_for_element_presence(
            "{discussion} {selector}".format(discussion=self._discussion_selector, selector=selector),
            "{selector} is visible".format(selector=selector)
        )
        self._find_within(selector).click()
    def click_cancel_new_post(self):
        self.click_element(".cancel")
        EmptyPromise(
            lambda: not self.is_new_post_opened(),
            "New post closed"
        ).fulfill()
    def click_new_post_button(self):
        self.click_element(".new-post-btn")
        EmptyPromise(
            self.is_new_post_opened,
            "New post opened"
        ).fulfill()
    @wait_for_js
    def _is_element_visible(self, selector):
        query = self._find_within(selector)
        return query.present and query.visible
class InlineDiscussionThreadPage(DiscussionThreadPage):
    def __init__(self, browser, thread_id):
        super(InlineDiscussionThreadPage, self).__init__(
            browser,
            "body.courseware .discussion-module #thread_{thread_id}".format(thread_id=thread_id)
        )
    def expand(self):
        """Clicks the link to expand the thread"""
        self._find_within(".forum-thread-expand").first.click()
        EmptyPromise(
            lambda: bool(self.get_response_total_text()),
            "Thread expanded"
        ).fulfill()
    def is_thread_anonymous(self):
        return not self.q(css=".posted-details > .username").present
    @wait_for_js
    def check_if_selector_is_focused(self, selector):
        """
        Check if selector is focused
        """
        return self.browser.execute_script("return $('{}').is(':focus')".format(selector))
class DiscussionUserProfilePage(CoursePage):
    TEXT_NEXT = u'Next >'
    TEXT_PREV = u'< Previous'
    PAGING_SELECTOR = "a.discussion-pagination[data-page-number]"
    def __init__(self, browser, course_id, user_id, username, page=1):
        super(DiscussionUserProfilePage, self).__init__(browser, course_id)
        self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page)
        self.username = username
    def is_browser_on_page(self):
        return (
            self.q(css='section.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present
            and
            self.q(css='section.user-profile a.learner-profile-link').present
            and
            self.q(css='section.user-profile a.learner-profile-link').text[0] == self.username
        )
    @wait_for_js
    def is_window_on_top(self):
        return self.browser.execute_script("return $('html, body').offset().top") == 0
    def get_shown_thread_ids(self):
        elems = self.q(css="article.discussion-thread")
        return [elem.get_attribute("id")[7:] for elem in elems]
    def get_current_page(self):
        def check_func():
            try:
                current_page = int(self.q(css="nav.discussion-paginator li.current-page").text[0])
            except:
                return False, None
            return True, current_page
        return Promise(
            check_func, 'discussion-paginator current page has text', timeout=5,
        ).fulfill()
    def _check_pager(self, text, page_number=None):
        """
        returns True if 'text' matches the text in any of the pagination elements.  If
        page_number is provided, only return True if the element points to that result
        page.
        """
        elems = self.q(css=self.PAGING_SELECTOR).filter(lambda elem: elem.text == text)
        if page_number:
            elems = elems.filter(lambda elem: int(elem.get_attribute('data-page-number')) == page_number)
        return elems.present
    def get_clickable_pages(self):
        return sorted([
            int(elem.get_attribute('data-page-number'))
            for elem in self.q(css=self.PAGING_SELECTOR)
            if str(elem.text).isdigit()
        ])
    def is_prev_button_shown(self, page_number=None):
        return self._check_pager(self.TEXT_PREV, page_number)
    def is_next_button_shown(self, page_number=None):
        return self._check_pager(self.TEXT_NEXT, page_number)
    def _click_pager_with_text(self, text, page_number):
        """
        click the first pagination element with whose text is `text` and ensure
        the resulting page number matches `page_number`.
        """
        targets = [elem for elem in self.q(css=self.PAGING_SELECTOR) if elem.text == text]
        targets[0].click()
        EmptyPromise(
            lambda: self.get_current_page() == page_number,
            "navigated to desired page"
        ).fulfill()
    def click_prev_page(self):
        self._click_pager_with_text(self.TEXT_PREV, self.get_current_page() - 1)
        EmptyPromise(
            self.is_window_on_top,
            "Window is on top"
        ).fulfill()
    def click_next_page(self):
        self._click_pager_with_text(self.TEXT_NEXT, self.get_current_page() + 1)
        EmptyPromise(
            self.is_window_on_top,
            "Window is on top"
        ).fulfill()
    def click_on_page(self, page_number):
        self._click_pager_with_text(unicode(page_number), page_number)
        EmptyPromise(
            self.is_window_on_top,
            "Window is on top"
        ).fulfill()
    def click_on_sidebar_username(self):
        self.wait_for_page()
        self.q(css='.learner-profile-link').first.click()
class DiscussionTabHomePage(CoursePage, DiscussionPageMixin):
    ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert"
    def __init__(self, browser, course_id):
        super(DiscussionTabHomePage, self).__init__(browser, course_id)
        self.url_path = "discussion/forum/"
    def is_browser_on_page(self):
        return self.q(css=".discussion-body section.home-header").present
    def perform_search(self, text="dummy"):
        self.q(css=".forum-nav-search-input").fill(text + chr(10))
        EmptyPromise(
            self.is_ajax_finished,
            "waiting for server to return result"
        ).fulfill()
    def get_search_alert_messages(self):
        return self.q(css=self.ALERT_SELECTOR + " .message").text
    def get_search_alert_links(self):
        return self.q(css=self.ALERT_SELECTOR + " .link-jump")
    def dismiss_alert_message(self, text):
        """
        dismiss any search alert message containing the specified text.
        """
        def _match_messages(text):
            return self.q(css=".search-alert").filter(lambda elem: text in elem.text)
        for alert_id in _match_messages(text).attrs("id"):
            self.q(css="{}#{} a.dismiss".format(self.ALERT_SELECTOR, alert_id)).click()
        EmptyPromise(
            lambda: _match_messages(text).results == [],
            "waiting for dismissed alerts to disappear"
        ).fulfill()
    def click_new_post_button(self):
        """
        Clicks the 'New Post' button.
        """
        self.new_post_button.click()
        EmptyPromise(
            lambda: (
                self.new_post_form
            ),
            "New post action succeeded"
        ).fulfill()
    @property
    def new_post_button(self):
        """
        Returns the new post button.
        """
        elements = self.q(css="ol.course-tabs .new-post-btn")
        return elements.first if elements.visible and len(elements) == 1 else None
    @property
    def new_post_form(self):
        """
        Returns the new post form.
        """
        elements = self.q(css=".forum-new-post-form")
        return elements[0] if elements.visible and len(elements) == 1 else None
 | 
	agpl-3.0 | 1,997,688,661,498,393,600 | 38.129032 | 128 | 0.620147 | false | 
| 
	Balannen/LSMASOMM | 
	atom3/Kernel/ColoredText/configHandler.py | 
	1 | 
	27398 | 
	"""Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information.  In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
    """
    A ConfigParser specialised for idle configuration file handling
    """
    def __init__(self, cfgFile, cfgDefaults=None):
        """
        cfgFile - string, fully specified configuration file name
        """
        self.file=cfgFile
        ConfigParser.__init__(self,defaults=cfgDefaults)
    def Get(self, section, option, type=None, default=None):
        """
        Get an option value for given section/option or return default.
        If type is specified, return as type.
        """
        if type=='bool':
            getVal=self.getboolean
        elif type=='int':
            getVal=self.getint
        else:
            getVal=self.get
        if self.has_option(section,option):
            #return getVal(section, option, raw, vars, default)
            return getVal(section, option)
        else:
            return default
    def GetOptionList(self,section):
        """
        Get an option list for given section
        """
        if self.has_section(section):
            return self.options(section)
        else:  #return a default value
            return []
    def Load(self):
        """
        Load the configuration file from disk
        """
        self.read(self.file)
class IdleUserConfParser(IdleConfParser):
    """
    IdleConfigParser specialised for user configuration handling.
    """
    def AddSection(self,section):
        """
        if section doesn't exist, add it
        """
        if not self.has_section(section):
            self.add_section(section)
    def RemoveEmptySections(self):
        """
        remove any sections that have no options
        """
        for section in self.sections():
            if not self.GetOptionList(section):
                self.remove_section(section)
    def IsEmpty(self):
        """
        Remove empty sections and then return 1 if parser has no sections
        left, else return 0.
        """
        self.RemoveEmptySections()
        if self.sections():
            return 0
        else:
            return 1
    def RemoveOption(self,section,option):
        """
        If section/option exists, remove it.
        Returns 1 if option was removed, 0 otherwise.
        """
        if self.has_section(section):
            return self.remove_option(section,option)
    def SetOption(self,section,option,value):
        """
        Sets option to value, adding section if required.
        Returns 1 if option was added or changed, otherwise 0.
        """
        if self.has_option(section,option):
            if self.get(section,option)==value:
                return 0
            else:
                self.set(section,option,value)
                return 1
        else:
            if not self.has_section(section):
                self.add_section(section)
            self.set(section,option,value)
            return 1
    def RemoveFile(self):
        """
        Removes the user config file from disk if it exists.
        """
        if os.path.exists(self.file):
            os.remove(self.file)
    def Save(self):
        """Update user configuration file.
        Remove empty sections. If resulting config isn't empty, write the file
        to disk. If config is empty, remove the file from disk if it exists.
        """
        if not self.IsEmpty():
            cfgFile=open(self.file,'w')
            self.write(cfgFile)
        else:
            self.RemoveFile()
class IdleConf:
    """
    holds config parsers for all idle config files:
    default config files
        (idle install dir)/config-main.def
        (idle install dir)/config-extensions.def
        (idle install dir)/config-highlight.def
        (idle install dir)/config-keys.def
    user config  files
        (user home dir)/.idlerc/config-main.cfg
        (user home dir)/.idlerc/config-extensions.cfg
        (user home dir)/.idlerc/config-highlight.cfg
        (user home dir)/.idlerc/config-keys.cfg
    """
    def __init__(self):
        self.defaultCfg={}
        self.userCfg={}
        self.cfg={}
        self.CreateConfigHandlers()
        self.LoadCfgFiles()
        #self.LoadCfg()
    def CreateConfigHandlers(self):
        """
        set up a dictionary of config parsers for default and user
        configurations respectively
        """
        #build idle install path
        if __name__ != '__main__': # we were imported
            idleDir=os.path.dirname(__file__)
        else: # we were exec'ed (for testing only)
            idleDir=os.path.abspath(sys.path[0])
        userDir=self.GetUserCfgDir()
        configTypes=('main','extensions','highlight','keys')
        defCfgFiles={}
        usrCfgFiles={}
        for cfgType in configTypes: #build config file names
            defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
            usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
        for cfgType in configTypes: #create config parsers
            self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
            self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
    def GetUserCfgDir(self):
        """
        Creates (if required) and returns a filesystem directory for storing
        user config files.
        """
        cfgDir='.idlerc'
        userDir=os.path.expanduser('~')
        if userDir != '~': #'HOME' exists as a key in os.environ
            if not os.path.exists(userDir):
                warn=('\n Warning: HOME environment variable points to\n '+
                        userDir+'\n but the path does not exist.\n')
                sys.stderr.write(warn)
                userDir='~'
        if userDir=='~': #we still don't have a home directory
            #traditionally idle has defaulted to os.getcwd(), is this adeqate?
            userDir = os.getcwd() #hack for no real homedir
        userDir=os.path.join(userDir,cfgDir)
        if not os.path.exists(userDir):
            try: #make the config dir if it doesn't exist yet
                os.mkdir(userDir)
            except IOError:
                warn=('\n Warning: unable to create user config directory\n '+
                        userDir+'\n')
                sys.stderr.write(warn)
        return userDir
    def GetOption(self, configType, section, option, default=None, type=None):
        """
        Get an option value for given config type and given general
        configuration section/option or return a default. If type is specified,
        return as type. Firstly the user configuration is checked, with a
        fallback to the default configuration, and a final 'catch all'
        fallback to a useable passed-in default if the option isn't present in
        either the user or the default configuration.
        configType must be one of ('main','extensions','highlight','keys')
        If a default is returned a warning is printed to stderr.
        """
        if self.userCfg[configType].has_option(section,option):
            return self.userCfg[configType].Get(section, option, type=type)
        elif self.defaultCfg[configType].has_option(section,option):
            return self.defaultCfg[configType].Get(section, option, type=type)
        else: #returning default, print warning
            warning=('\n Warning: configHandler.py - IdleConf.GetOption -\n'+
                       ' problem retrieving configration option '+`option`+'\n'+
                       ' from section '+`section`+'.\n'+
                       ' returning default value: '+`default`+'\n')
            sys.stderr.write(warning)
            return default
    def GetSectionList(self, configSet, configType):
        """
        Get a list of sections from either the user or default config for
        the given config type.
        configSet must be either 'user' or 'default'
        configType must be one of ('main','extensions','highlight','keys')
        """
        if not (configType in ('main','extensions','highlight','keys')):
            raise InvalidConfigType, 'Invalid configType specified'
        if configSet == 'user':
            cfgParser=self.userCfg[configType]
        elif configSet == 'default':
            cfgParser=self.defaultCfg[configType]
        else:
            raise InvalidConfigSet, 'Invalid configSet specified'
        return cfgParser.sections()
    def GetHighlight(self, theme, element, fgBg=None):
        """
        return individual highlighting theme elements.
        fgBg - string ('fg'or'bg') or None, if None return a dictionary
        containing fg and bg colours (appropriate for passing to Tkinter in,
        e.g., a tag_config call), otherwise fg or bg colour only as specified.
        """
        if self.defaultCfg['highlight'].has_section(theme):
            themeDict=self.GetThemeDict('default',theme)
        else:
            themeDict=self.GetThemeDict('user',theme)
        fore=themeDict[element+'-foreground']
        if element=='cursor': #there is no config value for cursor bg
            back=themeDict['normal-background']
        else:
            back=themeDict[element+'-background']
        highlight={"foreground": fore,"background": back}
        if not fgBg: #return dict of both colours
            return highlight
        else: #return specified colour only
            if fgBg == 'fg':
                return highlight["foreground"]
            if fgBg == 'bg':
                return highlight["background"]
            else:
                raise InvalidFgBg, 'Invalid fgBg specified'
    def GetThemeDict(self,type,themeName):
        """
        type - string, 'default' or 'user' theme type
        themeName - string, theme name
        Returns a dictionary which holds {option:value} for each element
        in the specified theme. Values are loaded over a set of ultimate last
        fallback defaults to guarantee that all theme elements are present in
        a newly created theme.
        """
        if type == 'user':
            cfgParser=self.userCfg['highlight']
        elif type == 'default':
            cfgParser=self.defaultCfg['highlight']
        else:
            raise InvalidTheme, 'Invalid theme type specified'
        #foreground and background values are provded for each theme element
        #(apart from cursor) even though all these values are not yet used
        #by idle, to allow for their use in the future. Default values are
        #generally black and white.
        theme={ 'normal-foreground':'#000000',
                'normal-background':'#ffffff',
                'keyword-foreground':'#000000',
                'keyword-background':'#ffffff',
                'comment-foreground':'#000000',
                'comment-background':'#ffffff',
                'string-foreground':'#000000',
                'string-background':'#ffffff',
                'definition-foreground':'#000000',
                'definition-background':'#ffffff',
                'hilite-foreground':'#000000',
                'hilite-background':'gray',
                'break-foreground':'#ffffff',
                'break-background':'#000000',
                'hit-foreground':'#ffffff',
                'hit-background':'#000000',
                'error-foreground':'#ffffff',
                'error-background':'#000000',
                #cursor (only foreground can be set)
                'cursor-foreground':'#000000',
                #shell window
                'stdout-foreground':'#000000',
                'stdout-background':'#ffffff',
                'stderr-foreground':'#000000',
                'stderr-background':'#ffffff',
                'console-foreground':'#000000',
                'console-background':'#ffffff' }
        for element in theme.keys():
            if not cfgParser.has_option(themeName,element):
                #we are going to return a default, print warning
                warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'+
                           ' -\n problem retrieving theme element '+`element`+
                           '\n from theme '+`themeName`+'.\n'+
                           ' returning default value: '+`theme[element]`+'\n')
                sys.stderr.write(warning)
            colour=cfgParser.Get(themeName,element,default=theme[element])
            theme[element]=colour
        return theme
    def CurrentTheme(self):
        """
        Returns the name of the currently active theme
        """
        return self.GetOption('main','Theme','name',default='')
    def CurrentKeys(self):
        """
        Returns the name of the currently active key set
        """
        return self.GetOption('main','Keys','name',default='')
    def GetExtensions(self, activeOnly=1):
        """
        Gets a list of all idle extensions declared in the config files.
        activeOnly - boolean, if true only return active (enabled) extensions
        """
        extns=self.RemoveKeyBindNames(
                self.GetSectionList('default','extensions'))
        userExtns=self.RemoveKeyBindNames(
                self.GetSectionList('user','extensions'))
        for extn in userExtns:
            if extn not in extns: #user has added own extension
                extns.append(extn)
        if activeOnly:
            activeExtns=[]
            for extn in extns:
                if self.GetOption('extensions',extn,'enable',default=1,
                    type='bool'):
                    #the extension is enabled
                    activeExtns.append(extn)
            return activeExtns
        else:
            return extns
    def RemoveKeyBindNames(self,extnNameList):
        #get rid of keybinding section names
        names=extnNameList
        kbNameIndicies=[]
        for name in names:
            if name.endswith('_bindings') or name.endswith('_cfgBindings'):
                kbNameIndicies.append(names.index(name))
        kbNameIndicies.sort()
        kbNameIndicies.reverse()
        for index in kbNameIndicies: #delete each keybinding section name
            del(names[index])
        return names
    def GetExtnNameForEvent(self,virtualEvent):
        """
        Returns the name of the extension that virtualEvent is bound in, or
        None if not bound in any extension.
        virtualEvent - string, name of the virtual event to test for, without
                       the enclosing '<< >>'
        """
        extName=None
        vEvent='<<'+virtualEvent+'>>'
        for extn in self.GetExtensions(activeOnly=0):
            for event in self.GetExtensionKeys(extn).keys():
                if event == vEvent:
                    extName=extn
        return extName
    def GetExtensionKeys(self,extensionName):
        """
        returns a dictionary of the configurable keybindings for a particular
        extension,as they exist in the dictionary returned by GetCurrentKeySet;
        that is, where previously used bindings are disabled.
        """
        keysName=extensionName+'_cfgBindings'
        activeKeys=self.GetCurrentKeySet()
        extKeys={}
        if self.defaultCfg['extensions'].has_section(keysName):
            eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
            for eventName in eventNames:
                event='<<'+eventName+'>>'
                binding=activeKeys[event]
                extKeys[event]=binding
        return extKeys
    def __GetRawExtensionKeys(self,extensionName):
        """
        returns a dictionary of the configurable keybindings for a particular
        extension, as defined in the configuration files, or an empty dictionary
        if no bindings are found
        """
        keysName=extensionName+'_cfgBindings'
        extKeys={}
        if self.defaultCfg['extensions'].has_section(keysName):
            eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
            for eventName in eventNames:
                binding=self.GetOption('extensions',keysName,
                        eventName,default='').split()
                event='<<'+eventName+'>>'
                extKeys[event]=binding
        return extKeys
    def GetExtensionBindings(self,extensionName):
        """
        Returns a dictionary of all the event bindings for a particular
        extension. The configurable keybindings are returned as they exist in
        the dictionary returned by GetCurrentKeySet; that is, where re-used
        keybindings are disabled.
        """
        bindsName=extensionName+'_bindings'
        extBinds=self.GetExtensionKeys(extensionName)
        #add the non-configurable bindings
        if self.defaultCfg['extensions'].has_section(bindsName):
            eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
            for eventName in eventNames:
                binding=self.GetOption('extensions',bindsName,
                        eventName,default='').split()
                event='<<'+eventName+'>>'
                extBinds[event]=binding
        return extBinds
    def GetKeyBinding(self, keySetName, eventStr):
        """
        returns the keybinding for a specific event.
        keySetName - string, name of key binding set
        eventStr - string, the virtual event we want the binding for,
                   represented as a string, eg. '<<event>>'
        """
        eventName=eventStr[2:-2] #trim off the angle brackets
        binding=self.GetOption('keys',keySetName,eventName,default='').split()
        return binding
    def GetCurrentKeySet(self):
        return self.GetKeySet(self.CurrentKeys())
    def GetKeySet(self,keySetName):
        """
        Returns a dictionary of: all requested core keybindings, plus the
        keybindings for all currently active extensions. If a binding defined
        in an extension is already in use, that binding is disabled.
        """
        keySet=self.GetCoreKeys(keySetName)
        activeExtns=self.GetExtensions(activeOnly=1)
        for extn in activeExtns:
            extKeys=self.__GetRawExtensionKeys(extn)
            if extKeys: #the extension defines keybindings
                for event in extKeys.keys():
                    if extKeys[event] in keySet.values():
                        #the binding is already in use
                        extKeys[event]='' #disable this binding
                    keySet[event]=extKeys[event] #add binding
        return keySet
    def IsCoreBinding(self,virtualEvent):
        """
        returns true if the virtual event is bound in the core idle keybindings.
        virtualEvent - string, name of the virtual event to test for, without
                       the enclosing '<< >>'
        """
        return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
    def GetCoreKeys(self, keySetName=None):
        """
        returns the requested set of core keybindings, with fallbacks if
        required.
        Keybindings loaded from the config file(s) are loaded _over_ these
        defaults, so if there is a problem getting any core binding there will
        be an 'ultimate last resort fallback' to the CUA-ish bindings
        defined here.
        """
        keyBindings={
            '<<copy>>': ['<Control-c>', '<Control-C>'],
            '<<cut>>': ['<Control-x>', '<Control-X>'],
            '<<paste>>': ['<Control-v>', '<Control-V>'],
            '<<beginning-of-line>>': ['<Control-a>', '<Home>'],
            '<<center-insert>>': ['<Control-l>'],
            '<<close-all-windows>>': ['<Control-q>'],
            '<<close-window>>': ['<Alt-F4>'],
            '<<do-nothing>>': ['<Control-x>'],
            '<<end-of-file>>': ['<Control-d>'],
            '<<python-docs>>': ['<F1>'],
            '<<python-context-help>>': ['<Shift-F1>'],
            '<<history-next>>': ['<Alt-n>'],
            '<<history-previous>>': ['<Alt-p>'],
            '<<interrupt-execution>>': ['<Control-c>'],
            '<<view-restart>>': ['<F6>'],
            '<<restart-shell>>': ['<Control-F6>'],
            '<<open-class-browser>>': ['<Alt-c>'],
            '<<open-module>>': ['<Alt-m>'],
            '<<open-new-window>>': ['<Control-n>'],
            '<<open-window-from-file>>': ['<Control-o>'],
            '<<plain-newline-and-indent>>': ['<Control-j>'],
            '<<print-window>>': ['<Control-p>'],
            '<<redo>>': ['<Control-y>'],
            '<<remove-selection>>': ['<Escape>'],
            '<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
            '<<save-window-as-file>>': ['<Alt-s>'],
            '<<save-window>>': ['<Control-s>'],
            '<<select-all>>': ['<Alt-a>'],
            '<<toggle-auto-coloring>>': ['<Control-slash>'],
            '<<undo>>': ['<Control-z>'],
            '<<find-again>>': ['<Control-g>', '<F3>'],
            '<<find-in-files>>': ['<Alt-F3>'],
            '<<find-selection>>': ['<Control-F3>'],
            '<<find>>': ['<Control-f>'],
            '<<replace>>': ['<Control-h>'],
            '<<goto-line>>': ['<Alt-g>'],
            '<<smart-backspace>>': ['<Key-BackSpace>'],
            '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
            '<<smart-indent>>': ['<Key-Tab>'],
            '<<indent-region>>': ['<Control-Key-bracketright>'],
            '<<dedent-region>>': ['<Control-Key-bracketleft>'],
            '<<comment-region>>': ['<Alt-Key-3>'],
            '<<uncomment-region>>': ['<Alt-Key-4>'],
            '<<tabify-region>>': ['<Alt-Key-5>'],
            '<<untabify-region>>': ['<Alt-Key-6>'],
            '<<toggle-tabs>>': ['<Alt-Key-t>'],
            '<<change-indentwidth>>': ['<Alt-Key-u>']
            }
        if keySetName:
            for event in keyBindings.keys():
                binding=self.GetKeyBinding(keySetName,event)
                if binding:
                    keyBindings[event]=binding
                else: #we are going to return a default, print warning
                    warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'+
                               ' -\n problem retrieving key binding for event '+
                               `event`+'\n from key set '+`keySetName`+'.\n'+
                               ' returning default value: '+`keyBindings[event]`+'\n')
                    sys.stderr.write(warning)
        return keyBindings
    def GetExtraHelpSourceList(self,configSet):
        """Fetch list of extra help sources from a given configSet.
        Valid configSets are 'user' or 'default'.  Return a list of tuples of
        the form (menu_item , path_to_help_file , option), or return the empty
        list.  'option' is the sequence number of the help resource.  'option'
        values determine the position of the menu items on the Help menu,
        therefore the returned list must be sorted by 'option'.
        """
        helpSources=[]
        if configSet=='user':
            cfgParser=self.userCfg['main']
        elif configSet=='default':
            cfgParser=self.defaultCfg['main']
        else:
            raise InvalidConfigSet, 'Invalid configSet specified'
        options=cfgParser.GetOptionList('HelpFiles')
        for option in options:
            value=cfgParser.Get('HelpFiles',option,default=';')
            if value.find(';')==-1: #malformed config entry with no ';'
                menuItem='' #make these empty
                helpPath='' #so value won't be added to list
            else: #config entry contains ';' as expected
                value=string.split(value,';')
                menuItem=value[0].strip()
                helpPath=value[1].strip()
            if menuItem and helpPath: #neither are empty strings
                helpSources.append( (menuItem,helpPath,option) )
        helpSources.sort(self.__helpsort)
        return helpSources
    def __helpsort(self, h1, h2):
        if int(h1[2]) < int(h2[2]):
            return -1
        elif int(h1[2]) > int(h2[2]):
            return 1
        else:
            return 0
    def GetAllExtraHelpSourcesList(self):
        """
        Returns a list of tuples containing the details of all additional help
        sources configured, or an empty list if there are none. Tuples are of
        the format returned by GetExtraHelpSourceList.
        """
        allHelpSources=( self.GetExtraHelpSourceList('default')+
                self.GetExtraHelpSourceList('user') )
        return allHelpSources
    def LoadCfgFiles(self):
        """
        load all configuration files.
        """
        for key in self.defaultCfg.keys():
            self.defaultCfg[key].Load()
            self.userCfg[key].Load() #same keys
    def SaveUserCfgFiles(self):
        """
        write all loaded user configuration files back to disk
        """
        for key in self.userCfg.keys():
            self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
    def dumpCfg(cfg):
        print '\n',cfg,'\n'
        for key in cfg.keys():
            sections=cfg[key].sections()
            print key
            print sections
            for section in sections:
                options=cfg[key].options(section)
                print section
                print options
                for option in options:
                    print option, '=', cfg[key].Get(section,option)
    dumpCfg(idleConf.defaultCfg)
    dumpCfg(idleConf.userCfg)
    print idleConf.userCfg['main'].Get('Theme','name')
    #print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
 | 
	gpl-3.0 | 1,339,416,341,385,229,300 | 39.829008 | 86 | 0.563764 | false | 
| 
	0sc0d3r/enigma2 | 
	lib/python/Plugins/Extensions/DVDBurn/TitleCutter.py | 
	52 | 
	3783 | 
	from Plugins.Extensions.CutListEditor.plugin import CutListEditor
from Components.ServiceEventTracker import ServiceEventTracker
from enigma import iPlayableService, iServiceInformation
from Tools.Directories import fileExists
class TitleCutter(CutListEditor):
	def __init__(self, session, t):
		CutListEditor.__init__(self, session, t.source)
		self.skin = CutListEditor.skin
		self.session = session
		self.t = t
		self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
			{
				iPlayableService.evUpdatedInfo: self.getPMTInfo,
				iPlayableService.evCuesheetChanged: self.refillList
			})
		self.onExecBegin.remove(self.showTutorial)
	def getPMTInfo(self):
		service = self.session.nav.getCurrentService()
		audio = service and service.audioTracks()
		n = audio and audio.getNumberOfTracks() or 0
		if n > 0:
			from Title import ConfigFixedText
			from Project import iso639language
			from Components.config import config, ConfigSubsection, ConfigSubList, ConfigSelection, ConfigYesNo
			self.t.properties.audiotracks = ConfigSubList()
			for x in range(n):
				i = audio.getTrackInfo(x)
				DVB_lang = i.getLanguage()
				description = i.getDescription()
				pid = str(i.getPID())
				if description == "MPEG":
					description = "MP2"
				print "[audiotrack] pid:", pid, "description:", description, "language:", DVB_lang, "count:", x, "active:", (x < 8)
				self.t.properties.audiotracks.append(ConfigSubsection())
				self.t.properties.audiotracks[-1].active = ConfigYesNo(default = (x < 8))
				self.t.properties.audiotracks[-1].format = ConfigFixedText(description)
				choicelist = iso639language.getChoices()
				determined_language = iso639language.determineLanguage(DVB_lang)
				self.t.properties.audiotracks[-1].language = ConfigSelection(choices = choicelist, default=determined_language)
				self.t.properties.audiotracks[-1].pid = ConfigFixedText(pid)
				self.t.properties.audiotracks[-1].DVB_lang = ConfigFixedText(DVB_lang)
		sAspect = service.info().getInfo(iServiceInformation.sAspect)
		if sAspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ):
			aspect = "4:3"
		else:
			aspect = "16:9"
		self.t.properties.aspect.setValue(aspect)
		self.t.VideoType = service.info().getInfo(iServiceInformation.sVideoType)
		self.t.VideoPID = service.info().getInfo(iServiceInformation.sVideoPID)
		xres = service.info().getInfo(iServiceInformation.sVideoWidth)
		yres = service.info().getInfo(iServiceInformation.sVideoHeight)
		self.t.resolution = (xres, yres)
		self.t.framerate = service.info().getInfo(iServiceInformation.sFrameRate)
		self.t.progressive = service.info().getInfo(iServiceInformation.sProgressive)
	def checkAndGrabThumb(self):
		if not fileExists(self.t.inputfile.rsplit('.',1)[0] + ".png"):
			CutListEditor.grabFrame(self)
	def exit(self):
		if self.t.VideoType == -1:
			self.getPMTInfo()
		self.checkAndGrabThumb()
		self.session.nav.stopService()
		self.close(self.cut_list[:])
class CutlistReader(TitleCutter):
	skin = """
		<screen position="0,0" size="720,576">
		<eLabel position="0,0" size="720,576" zPosition="1" backgroundColor="#000000" />
		<widget name="Video" position="0,0" size="100,75" />
		<widget name="SeekState" position="0,0" />
		<widget source="cutlist" position="0,0" render="Listbox" >
			<convert type="TemplatedMultiContent">
				{"template": [
						MultiContentEntryText(text = 1),
						MultiContentEntryText(text = 2)
					],
				 "fonts": [gFont("Regular", 18)],
				 "itemHeight": 20
				}
			</convert>
		</widget>
		<widget name="Timeline" position="0,0" />
	</screen>"""
	def __init__(self, session, t):
		TitleCutter.__init__(self, session, t)
		self.skin = CutlistReader.skin
	def getPMTInfo(self):
		TitleCutter.getPMTInfo(self)
		TitleCutter.checkAndGrabThumb(self)
		self.close(self.cut_list[:])
 | 
	gpl-2.0 | 8,703,329,266,913,214,000 | 38.40625 | 119 | 0.720856 | false | 
| 
	nddsg/TreeDecomps | 
	xplodnTree/tdec/b2CliqueTreeRules.py | 
	1 | 
	3569 | 
	#!/usr/bin/env python
__author__ = 'saguinag' + '@' + 'nd.edu'
__version__ = "0.1.0"
##
## fname "b2CliqueTreeRules.py"
##
## TODO: some todo list
## VersionLog:
import net_metrics as metrics
import pandas as pd
import argparse, traceback
import os, sys
import networkx as nx
import re
from collections import deque, defaultdict, Counter
import tree_decomposition as td
import PHRG as phrg
import probabilistic_cfg as pcfg
import exact_phrg as xphrg
import a1_hrg_cliq_tree as nfld
from a1_hrg_cliq_tree import load_edgelist
DEBUG = False
def get_parser ():
  parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules')
  parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)')
  parser.add_argument('--version', action='version', version=__version__)
  return parser
def dimacs_td_ct (tdfname):
  """ tree decomp to clique-tree """
  print '... input file:', tdfname
  fname = tdfname
  graph_name = os.path.basename(fname)
  gname = graph_name.split('.')[0]
  gfname = "datasets/out." + gname
  tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic
  tfname = gname+"."+tdh
  G = load_edgelist(gfname)
  if DEBUG: print nx.info(G)
  print
  with open(fname, 'r') as f:  # read tree decomp from inddgo
    lines = f.readlines()
    lines = [x.rstrip('\r\n') for x in lines]
  cbags = {}
  bags = [x.split() for x in lines if x.startswith('B')]
  for b in bags:
    cbags[int(b[1])] = [int(x) for x in b[3:]]  # what to do with bag size?
  edges = [x.split()[1:] for x in lines if x.startswith('e')]
  edges = [[int(k) for k in x] for x in edges]
  tree = defaultdict(set)
  for s, t in edges:
    tree[frozenset(cbags[s])].add(frozenset(cbags[t]))
    if DEBUG: print '.. # of keys in `tree`:', len(tree.keys())
  if DEBUG: print tree.keys()
  root = list(tree)[0]
  if DEBUG: print '.. Root:', root
  root = frozenset(cbags[1])
  if DEBUG: print '.. Root:', root
  T = td.make_rooted(tree, root)
  if DEBUG: print '.. T rooted:', len(T)
  # nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets
  T = phrg.binarize(T)
  prod_rules = {}
  td.new_visit(T, G, prod_rules)
  if DEBUG: print "--------------------"
  if DEBUG: print "- Production Rules -"
  if DEBUG: print "--------------------"
  for k in prod_rules.iterkeys():
    if DEBUG: print k
    s = 0
    for d in prod_rules[k]:
      s += prod_rules[k][d]
    for d in prod_rules[k]:
      prod_rules[k][d] = float(prod_rules[k][d]) / float(s)  # normailization step to create probs not counts.
      if DEBUG: print '\t -> ', d, prod_rules[k][d]
  rules = []
  id = 0
  for k, v in prod_rules.iteritems():
    sid = 0
    for x in prod_rules[k]:
      rhs = re.findall("[^()]+", x)
      rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
      if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
      sid += 1
    id += 1
  df = pd.DataFrame(rules)
  outdf_fname = "./ProdRules/"+tfname+".prules"
  if not os.path.isfile(outdf_fname+".bz2"):
    print '...',outdf_fname, "written"
    df.to_csv(outdf_fname+".bz2", compression="bz2")
  else:
    print '...', outdf_fname, "file exists"
  return
def main ():
  parser = get_parser()
  args = vars(parser.parse_args())
  dimacs_td_ct(args['treedecomp'])  # gen synth graph
if __name__ == '__main__':
  try:
    main()
  except Exception, e:
    print str(e)
    traceback.print_exc()
    sys.exit(1)
  sys.exit(0)
 | 
	mit | 1,961,374,472,834,969,000 | 26.037879 | 112 | 0.612496 | false | 
| 
	ar4s/django | 
	django/db/models/sql/expressions.py | 
	3 | 
	4373 | 
	import copy
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
class SQLEvaluator(object):
    def __init__(self, expression, query, allow_joins=True, reuse=None):
        self.expression = expression
        self.opts = query.get_meta()
        self.reuse = reuse
        self.cols = []
        self.expression.prepare(self, query, allow_joins)
    def relabeled_clone(self, change_map):
        clone = copy.copy(self)
        clone.cols = []
        for node, col in self.cols:
            if hasattr(col, 'relabeled_clone'):
                clone.cols.append((node, col.relabeled_clone(change_map)))
            else:
                clone.cols.append((node,
                                   (change_map.get(col[0], col[0]), col[1])))
        return clone
    def get_cols(self):
        cols = []
        for node, col in self.cols:
            if hasattr(node, 'get_cols'):
                cols.extend(node.get_cols())
            elif isinstance(col, tuple):
                cols.append(col)
        return cols
    def prepare(self):
        return self
    def as_sql(self, qn, connection):
        return self.expression.evaluate(self, qn, connection)
    #####################################################
    # Vistor methods for initial expression preparation #
    #####################################################
    def prepare_node(self, node, query, allow_joins):
        for child in node.children:
            if hasattr(child, 'prepare'):
                child.prepare(self, query, allow_joins)
    def prepare_leaf(self, node, query, allow_joins):
        if not allow_joins and LOOKUP_SEP in node.name:
            raise FieldError("Joined field references are not permitted in this query")
        field_list = node.name.split(LOOKUP_SEP)
        if node.name in query.aggregates:
            self.cols.append((node, query.aggregate_select[node.name]))
        else:
            try:
                field, sources, opts, join_list, path = query.setup_joins(
                    field_list, query.get_meta(),
                    query.get_initial_alias(), self.reuse)
                targets, _, join_list = query.trim_joins(sources, join_list, path)
                if self.reuse is not None:
                    self.reuse.update(join_list)
                for t in targets:
                    self.cols.append((node, (join_list[-1], t.column)))
            except FieldDoesNotExist:
                raise FieldError("Cannot resolve keyword %r into field. "
                                 "Choices are: %s" % (self.name,
                                                      [f.name for f in self.opts.fields]))
    ##################################################
    # Vistor methods for final expression evaluation #
    ##################################################
    def evaluate_node(self, node, qn, connection):
        expressions = []
        expression_params = []
        for child in node.children:
            if hasattr(child, 'evaluate'):
                sql, params = child.evaluate(self, qn, connection)
            else:
                sql, params = '%s', (child,)
            if len(getattr(child, 'children', [])) > 1:
                format = '(%s)'
            else:
                format = '%s'
            if sql:
                expressions.append(format % sql)
                expression_params.extend(params)
        return connection.ops.combine_expression(node.connector, expressions), expression_params
    def evaluate_leaf(self, node, qn, connection):
        col = None
        for n, c in self.cols:
            if n is node:
                col = c
                break
        if col is None:
            raise ValueError("Given node not found")
        if hasattr(col, 'as_sql'):
            return col.as_sql(qn, connection)
        else:
            return '%s.%s' % (qn(col[0]), qn(col[1])), []
    def evaluate_date_modifier_node(self, node, qn, connection):
        timedelta = node.children.pop()
        sql, params = self.evaluate_node(node, qn, connection)
        if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0):
            return sql, params
        return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
 | 
	bsd-3-clause | -2,947,435,316,157,446,000 | 36.376068 | 96 | 0.52504 | false | 
| 
	hynnet/openwrt-mt7620 | 
	staging_dir/host/lib/python2.7/ctypes/test/test_struct_fields.py | 
	264 | 
	1503 | 
	import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
    # Structure/Union classes must get 'finalized' sooner or
    # later, when one of these things happen:
    #
    # 1. _fields_ is set.
    # 2. An instance is created.
    # 3. The type is used as field of another Structure/Union.
    # 4. The type is subclassed
    #
    # When they are finalized, assigning _fields_ is no longer allowed.
    def test_1_A(self):
        class X(Structure):
            pass
        self.assertEqual(sizeof(X), 0) # not finalized
        X._fields_ = [] # finalized
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
    def test_1_B(self):
        class X(Structure):
            _fields_ = [] # finalized
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
    def test_2(self):
        class X(Structure):
            pass
        X()
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
    def test_3(self):
        class X(Structure):
            pass
        class Y(Structure):
            _fields_ = [("x", X)] # finalizes X
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
    def test_4(self):
        class X(Structure):
            pass
        class Y(X):
            pass
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
        Y._fields_ = []
        self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
    unittest.main()
 | 
	gpl-2.0 | -1,112,820,804,364,292,400 | 29.06 | 71 | 0.566866 | false | 
| 
	iodoom-gitorious/enhanced-iodoom3 | 
	neo/sys/linux/runner/runner_lib.py | 
	61 | 
	6759 | 
	# run doom process on a series of maps
# can be used for regression testing, or to fetch media
# keeps a log of each run ( see getLogfile )
# currently uses a basic stdout activity timeout to decide when to move on
# using a periodic check of /proc/<pid>/status SleepAVG
# when the sleep average is reaching 0, issue a 'quit' to stdout
# keeps serialized run status in runner.pickle
# NOTE: can be used to initiate runs on failed maps only for instance etc.
# TODO: use the serialized and not the logs to sort the run order
# TODO: better logging. Use idLogger?
# TODO: configurable event when the process is found interactive
# instead of emitting a quit, perform some warning action?
import sys, os, commands, string, time, traceback, pickle
from twisted.application import internet, service
from twisted.internet import protocol, reactor, utils, defer
from twisted.internet.task import LoopingCall
class doomClientProtocol( protocol.ProcessProtocol ):
	# ProcessProtocol API
	def connectionMade( self ):
		self.logfile.write( 'connectionMade\n' )
		
	def outReceived( self, data ):
		print data
		self.logfile.write( data )
	def errReceived( self, data ):
		print 'stderr: ' + data
		self.logfile.write( 'stderr: ' + data )
		
	def inConnectionLost( self ):
		self.logfile.write( 'inConnectionLost\n' )
		
	def outConnectionLost( self ):
		self.logfile.write( 'outConnectionLost\n' )
		
	def errConnectionLost( self ):
		self.logfile.write( 'errConnectionLost\n' )
		
	def processEnded( self, status_object ):
		self.logfile.write( 'processEnded %s\n' % repr( status_object ) )
		self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
		self.logfile.close()
		self.deferred.callback( None )
		
	# mac management
	def __init__( self, logfilename, deferred ):
		self.logfilename = logfilename
		self.logfile = open( logfilename, 'a' )
		self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
		self.deferred = deferred
class doomService( service.Service ):
	# current monitoring state
	# 0: nothing running
	# 1: we have a process running, we're monitoring it's CPU usage
	# 2: we issued a 'quit' to the process's stdin
	#   either going to get a processEnded, or a timeout
	# 3: we forced a kill because of error, timeout etc.
	state = 0
	# load check period
	check_period = 10
	# pickled status file
	pickle_file = 'runner.pickle'
	# stores status indexed by filename
	# { 'mapname' : ( state, last_update ), .. }
	status = {}
	# start the maps as multiplayer server
	multiplayer = 0
	def __init__( self, bin, cmdline, maps, sort = 0, multiplayer = 0, blank_run = 0 ):
		self.p_transport = None
		self.multiplayer = multiplayer
		self.blank_run = blank_run
		if ( self.multiplayer ):
			print 'Operate in multiplayer mode'
		self.bin = os.path.abspath( bin )
		if ( type( cmdline ) is type( '' ) ):
			self.cmdline = string.split( cmdline, ' ' )
		else:
			self.cmdline = cmdline
		self.maps = maps
		if ( os.path.exists( self.pickle_file ) ):
			print 'Loading pickled status %s' % self.pickle_file
			handle = open( self.pickle_file, 'r' )
			self.status = pickle.load( handle )
			handle.close()
		if ( sort ):
			print 'Sorting maps oldest runs first'
			maps_sorted = [ ]
			for i in self.maps:
				i_log = self.getLogfile( i )
				if ( os.path.exists( i_log ) ):
					maps_sorted.append( ( i, os.path.getmtime( i_log ) ) )
				else:
					maps_sorted.append( ( i, 0 ) )
			maps_sorted.sort( lambda x,y : cmp( x[1], y[1] ) )
			self.maps = [ ]
			if ( blank_run ):
				self.maps.append( 'blankrun' )
			for i in maps_sorted:
				self.maps.append( i[ 0 ] )
			print 'Sorted as: %s\n' % repr( self.maps )
	def getLogfile( self, name ):
		return 'logs/' + string.translate( name, string.maketrans( '/', '-' ) ) + '.log'
	# deferred call when child process dies
	def processEnded( self, val ):
		print 'child has died - state %d' % self.state
		self.status[ self.maps[ self.i_map ] ] = ( self.state, time.time() )
		self.i_map += 1
		if ( self.i_map >= len( self.maps ) ):
			reactor.stop()
		else:
			self.nextMap()
	def processTimeout( self ):
		self.p_transport.signalProcess( "KILL" )
	def sleepAVGReply( self, val ):
		try:
			s = val[10:][:-2]
			print 'sleepAVGReply %s%%' % s
			if ( s == '0' ):
				# need twice in a row
				if ( self.state == 2 ):					
					print 'child process is interactive'
					self.p_transport.write( 'quit\n' )
				else:
					self.state = 2
			else:
				self.state = 1
#			else:
#				reactor.callLater( self.check_period, self.checkCPU )
		except:
			print traceback.format_tb( sys.exc_info()[2] )
			print sys.exc_info()[0]
			print 'exception raised in sleepAVGReply - killing process'
			self.state = 3
			self.p_transport.signalProcess( 'KILL' )
	def sleepAVGTimeout( self ):
		print 'sleepAVGTimeout - killing process'
		self.state = 3
		self.p_transport.signalProcess( 'KILL' )
	# called at regular intervals to monitor the sleep average of the child process
	# when sleep reaches 0, it means the map is loaded and interactive
	def checkCPU( self ):
		if ( self.state == 0 or self.p_transport is None or self.p_transport.pid is None ):
			print 'checkCPU: no child process atm'
			return
		defer = utils.getProcessOutput( '/bin/bash', [ '-c', 'cat /proc/%d/status | grep SleepAVG' % self.p_transport.pid ] )
		defer.addCallback( self.sleepAVGReply )
		defer.setTimeout( 2, self.sleepAVGTimeout )		
	def nextMap( self ):
		self.state = 0
		name = self.maps[ self.i_map ]
		print 'Starting map: ' + name
		logfile = self.getLogfile( name )
		print 'Logging to: ' + logfile
		if ( self.multiplayer ):
			cmdline = [ self.bin ] + self.cmdline + [ '+set', 'si_map', name ]
			if ( name != 'blankrun' ):
				cmdline.append( '+spawnServer' )
		else:
			cmdline = [ self.bin ] + self.cmdline
			if ( name != 'blankrun' ):
				cmdline += [ '+devmap', name ]
		print 'Command line: ' + repr( cmdline )		
		self.deferred = defer.Deferred()
		self.deferred.addCallback( self.processEnded )
		self.p_transport = reactor.spawnProcess( doomClientProtocol( logfile, self.deferred ), self.bin, cmdline , path = os.path.dirname( self.bin ), env = os.environ )
		self.state = 1
#		# setup the CPU usage loop
#		reactor.callLater( self.check_period, self.checkCPU )
	def startService( self ):
		print 'doomService startService'
		loop = LoopingCall( self.checkCPU )
		loop.start( self.check_period )
		self.i_map = 0
		self.nextMap()
	def stopService( self ):
		print 'doomService stopService'
		if ( not self.p_transport.pid is None ):			
			self.p_transport.signalProcess( 'KILL' )
		# serialize
		print 'saving status to %s' % self.pickle_file
		handle = open( self.pickle_file, 'w+' )
		pickle.dump( self.status, handle )
		handle.close()
 | 
	gpl-3.0 | 291,736,784,438,179,700 | 31.339713 | 163 | 0.669182 | false | 
| 
	igel-kun/pyload | 
	module/plugins/hooks/CloudFlareDdos.py | 
	1 | 
	11909 | 
	# -*- coding: utf-8 -*-
import inspect
import re
import urlparse
from module.network.HTTPRequest import BadHeader
from ..captcha.ReCaptcha import ReCaptcha
from ..internal.Addon import Addon
from ..internal.misc import parse_html_header
def plugin_id(plugin):
    return ("<%(plugintype)s %(pluginname)s%(id)s>" %
            {'plugintype': plugin.__type__.upper(),
             'pluginname': plugin.__name__,
             'id': "[%s]" % plugin.pyfile.id if plugin.pyfile else ""})
def is_simple_plugin(obj):
    return any(k.__name__ in ("SimpleHoster", "SimpleCrypter")
               for k in inspect.getmro(type(obj)))
def get_plugin_last_header(plugin):
    # @NOTE: req can be a HTTPRequest or a Browser object
    return plugin.req.http.header if hasattr(plugin.req, "http") else plugin.req.header
class CloudFlare(object):
    @staticmethod
    def handle_function(addon_plugin, owner_plugin, func_name, orig_func, args):
        addon_plugin.log_debug("Calling %s() of %s" % (func_name, plugin_id(owner_plugin)))
        try:
            data = orig_func(*args[0], **args[1])
            addon_plugin.log_debug("%s() returned successfully" % func_name)
            return data
        except BadHeader, e:
            addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code))
            header = parse_html_header(e.header)
            if "cloudflare" in header.get('server', ""):
                if e.code == 403:
                    data = CloudFlare._solve_cf_security_check(addon_plugin, owner_plugin, e.content)
                elif e.code == 503:
                    for _i in range(3):
                        try:
                            data = CloudFlare._solve_cf_ddos_challenge(addon_plugin, owner_plugin, e.content)
                            break
                        except BadHeader, e:  #: Possibly we got another ddos challenge
                            addon_plugin.log_debug("%s(): got BadHeader exception %s" % (func_name, e.code))
                            header = parse_html_header(e.header)
                            if e.code == 503 and "cloudflare" in header.get('server', ""):
                                continue  #: Yes, it's a ddos challenge again..
                            else:
                                data = None  # Tell the exception handler to re-throw the exception
                                break
                    else:
                        addon_plugin.log_error("%s(): Max solve retries reached" % func_name)
                        data = None  # Tell the exception handler to re-throw the exception
                else:
                    addon_plugin.log_warning(_("Unknown CloudFlare response code %s") % e.code)
                    raise
                if data is None:
                    raise e
                else:
                    return data
            else:
                raise
    @staticmethod
    def _solve_cf_ddos_challenge(addon_plugin, owner_plugin, data):
        try:
            addon_plugin.log_info(_("Detected CloudFlare's DDoS protection page"))
            # Cloudflare requires a delay before solving the challenge
            wait_time = (int(re.search('submit\(\);\r?\n\s*},\s*([0-9]+)', data).group(1)) + 999) / 1000
            owner_plugin.set_wait(wait_time)
            last_url = owner_plugin.req.lastEffectiveURL
            urlp = urlparse.urlparse(last_url)
            domain = urlp.netloc
            submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (urlp.scheme, domain)
            get_params = {}
            try:
                get_params['jschl_vc'] = re.search(r'name="jschl_vc" value="(\w+)"', data).group(1)
                get_params['pass'] = re.search(r'name="pass" value="(.+?)"', data).group(1)
                get_params['s'] = re.search(r'name="s" value="(.+?)"', data).group(1)
                # Extract the arithmetic operation
                js = re.search(r'setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n',
                               data).group(1)
                js = re.sub(r'a\.value = (.+\.toFixed\(10\);).+', r'\1', js)
                solution_name = re.search(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(.+)\s*=', js).group(1)
                g = re.search(r'(.*};)\n\s*(t\s*=(.+))\n\s*(;%s.*)' % (solution_name), js, re.M | re.I | re.S).groups()
                js = g[0] + g[-1]
                js = re.sub(r"[\n\\']", "", js)
            except Exception:
                # Something is wrong with the page.
                # This may indicate CloudFlare has changed their anti-bot
                # technique.
                owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
                return None  # Tell the exception handler to re-throw the exception
            if "toFixed" not in js:
                owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
                return None  # Tell the exception handler to re-throw the exception
            atob = 'var atob = function(str) {return Buffer.from(str, "base64").toString("binary");}'
            try:
                k = re.search(r'k\s*=\s*\'(.+?)\';', data).group(1)
                v = re.search(r'<div(?:.*)id="%s"(?:.*)>(.*)</div>' % k, data).group(1)
                doc = 'var document= {getElementById: function(x) { return {innerHTML:"%s"};}}' % v
            except (AttributeError, IndexError):
                doc = ''
            js = '%s;%s;var t="%s";%s' % (doc, atob, domain, js)
            # Safely evaluate the Javascript expression
            res = owner_plugin.js.eval(js)
            try:
                get_params['jschl_answer'] = str(float(res))
            except ValueError:
                owner_plugin.log_error(_("Unable to parse CloudFlare's DDoS protection page"))
                return None  # Tell the exception handler to re-throw the exception
            owner_plugin.wait()  # Do the actual wait
            return owner_plugin.load(submit_url,
                                     get=get_params,
                                     ref=last_url)
        except BadHeader, e:
            raise e  #: Huston, we have a BadHeader!
        except Exception, e:
            addon_plugin.log_error(e)
            return None  # Tell the exception handler to re-throw the exception
    @staticmethod
    def _solve_cf_security_check(addon_plugin, owner_plugin, data):
        try:
            last_url = owner_plugin.req.lastEffectiveURL
            captcha = ReCaptcha(owner_plugin.pyfile)
            captcha_key = captcha.detect_key(data)
            if captcha_key:
                addon_plugin.log_info(_("Detected CloudFlare's security check page"))
                response, challenge = captcha.challenge(captcha_key, data)
                return owner_plugin.load(owner_plugin.fixurl("/cdn-cgi/l/chk_captcha"),
                                         get={'g-recaptcha-response': response},
                                         ref=last_url)
            else:
                addon_plugin.log_warning(_("Got unexpected CloudFlare html page"))
                return None  # Tell the exception handler to re-throw the exception
        except Exception, e:
            addon_plugin.log_error(e)
            return None  # Tell the exception handler to re-throw the exception
class PreloadStub(object):
    def __init__(self, addon_plugin, owner_plugin):
        self.addon_plugin = addon_plugin
        self.owner_plugin = owner_plugin
        self.old_preload = owner_plugin._preload
    def my_preload(self, *args, **kwargs):
        data = CloudFlare.handle_function(self.addon_plugin, self.owner_plugin, "_preload", self.old_preload, (args, kwargs))
        if data is not None:
            self.owner_plugin.data = data
    def __repr__(self):
        return "<PreloadStub object at %s>" % hex(id(self))
class CloudFlareDdos(Addon):
    __name__ = "CloudFlareDdos"
    __type__ = "hook"
    __version__ = "0.16"
    __status__ = "testing"
    __config__ = [("activated", "bool", "Activated", False)]
    __description__ = """CloudFlare DDoS protection support"""
    __license__ = "GPLv3"
    __authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
    def activate(self):
        self.stubs = {}
        self._override_get_url()
    def deactivate(self):
        while len(self.stubs):
            stub = next(self.stubs.itervalues())
            self._unoverride_preload(stub.owner_plugin)
        self._unoverride_get_url()
    def _unoverride_preload(self, plugin):
        if id(plugin) in self.stubs:
            self.log_debug("Unoverriding _preload() for %s" % plugin_id(plugin))
            stub = self.stubs.pop(id(plugin))
            stub.owner_plugin._preload = stub.old_preload
        else:
            self.log_warning(_("No _preload() override found for %s, cannot un-override>") %
                plugin_id(plugin))
    def _override_preload(self, plugin):
        if id(plugin) not in self.stubs:
            stub = PreloadStub(self, plugin)
            self.stubs[id(plugin)] = stub
            self.log_debug("Overriding _preload() for %s" % plugin_id(plugin))
            plugin._preload = stub.my_preload
        else:
            self.log_warning(_("Already overrided _preload() for %s") % plugin_id(plugin))
    def _override_get_url(self):
        self.log_debug("Overriding get_url()")
        self.old_get_url = self.pyload.requestFactory.getURL
        self.pyload.requestFactory.getURL = self.my_get_url
    def _unoverride_get_url(self):
        self.log_debug("Unoverriding get_url()")
        self.pyload.requestFactory.getURL = self.old_get_url
    def _find_owner_plugin(self):
        """
        Walk the callstack until we find SimpleHoster or SimpleCrypter class
        Dirty but works.
        """
        f = frame = inspect.currentframe()
        try:
            while True:
                if f is None:
                    return None
                elif 'self' in f.f_locals and is_simple_plugin(f.f_locals['self']):
                    return f.f_locals['self']
                else:
                    f = f.f_back
        finally:
            del frame
    def download_preparing(self, pyfile):
        #: Only SimpleHoster and SimpleCrypter based plugins are supported
        if not is_simple_plugin(pyfile.plugin):
            self.log_debug("Skipping plugin %s" % plugin_id(pyfile.plugin))
            return
        attr = getattr(pyfile.plugin, "_preload", None)
        if not attr and not callable(attr):
            self.log_error(_("%s is missing _preload() function, cannot override!") % plugin_id(pyfile.plugin))
            return
        self._override_preload(pyfile.plugin)
    def download_processed(self, pyfile):
        if id(pyfile.plugin) in self.stubs:
            self._unoverride_preload(pyfile.plugin)
    def my_get_url(self, *args, **kwargs):
        owner_plugin = self._find_owner_plugin()
        if owner_plugin is None:
            self.log_warning(_("Owner plugin not found, cannot process"))
            return self.old_get_url(*args, **kwargs)
        else:
            #@NOTE: Better use owner_plugin.load() instead of get_url() so cookies are saved and so captcha credits
            #@NOTE: Also that way we can use 'owner_plugin.req.header' to get the headers, otherwise we cannot get them
            res = CloudFlare.handle_function(self, owner_plugin, "get_url", owner_plugin.load, (args, kwargs))
            if kwargs.get('just_header', False):
                # @NOTE: SimpleHoster/SimpleCrypter returns a dict while get_url() returns raw headers string,
                # make sure we return a string for get_url('just_header'=True)
                res = get_plugin_last_header(owner_plugin)
            return res
 | 
	gpl-3.0 | -8,680,382,063,904,714,000 | 37.665584 | 128 | 0.546393 | false | 
| 
	chrisndodge/edx-platform | 
	lms/djangoapps/student_profile/test/test_views.py | 
	113 | 
	3370 | 
	# -*- coding: utf-8 -*-
""" Tests for student profile views. """
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from util.testing import UrlResetMixin
from student.tests.factories import UserFactory
from student_profile.views import learner_profile_context
class LearnerProfileViewTest(UrlResetMixin, TestCase):
    """ Tests for the student profile view. """
    USERNAME = "username"
    PASSWORD = "password"
    CONTEXT_DATA = [
        'default_public_account_fields',
        'accounts_api_url',
        'preferences_api_url',
        'account_settings_page_url',
        'has_preferences_access',
        'own_profile',
        'country_options',
        'language_options',
        'account_settings_data',
        'preferences_data',
    ]
    def setUp(self):
        super(LearnerProfileViewTest, self).setUp()
        self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
        self.client.login(username=self.USERNAME, password=self.PASSWORD)
    def test_context(self):
        """
        Verify learner profile page context data.
        """
        request = RequestFactory().get('/url')
        request.user = self.user
        context = learner_profile_context(request, self.USERNAME, self.user.is_staff)
        self.assertEqual(
            context['data']['default_public_account_fields'],
            settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields']
        )
        self.assertEqual(
            context['data']['accounts_api_url'],
            reverse("accounts_api", kwargs={'username': self.user.username})
        )
        self.assertEqual(
            context['data']['preferences_api_url'],
            reverse('preferences_api', kwargs={'username': self.user.username})
        )
        self.assertEqual(
            context['data']['profile_image_upload_url'],
            reverse("profile_image_upload", kwargs={'username': self.user.username})
        )
        self.assertEqual(
            context['data']['profile_image_remove_url'],
            reverse('profile_image_remove', kwargs={'username': self.user.username})
        )
        self.assertEqual(
            context['data']['profile_image_max_bytes'],
            settings.PROFILE_IMAGE_MAX_BYTES
        )
        self.assertEqual(
            context['data']['profile_image_min_bytes'],
            settings.PROFILE_IMAGE_MIN_BYTES
        )
        self.assertEqual(context['data']['account_settings_page_url'], reverse('account_settings'))
        for attribute in self.CONTEXT_DATA:
            self.assertIn(attribute, context['data'])
    def test_view(self):
        """
        Verify learner profile page view.
        """
        profile_path = reverse('learner_profile', kwargs={'username': self.USERNAME})
        response = self.client.get(path=profile_path)
        for attribute in self.CONTEXT_DATA:
            self.assertIn(attribute, response.content)
    def test_undefined_profile_page(self):
        """
        Verify that a 404 is returned for a non-existent profile page.
        """
        profile_path = reverse('learner_profile', kwargs={'username': "no_such_user"})
        response = self.client.get(path=profile_path)
        self.assertEqual(404, response.status_code)
 | 
	agpl-3.0 | 4,813,397,828,913,565,000 | 31.718447 | 99 | 0.622255 | false | 
| 
	ProfessorX/Config | 
	.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KShortcutWidget.py | 
	1 | 
	1269 | 
	# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KShortcutWidget(__PyQt4_QtGui.QWidget):
    # no doc
    def applyStealShortcut(self, *args, **kwargs): # real signature unknown
        pass
    def clearShortcut(self, *args, **kwargs): # real signature unknown
        pass
    def isModifierlessAllowed(self, *args, **kwargs): # real signature unknown
        pass
    def setCheckActionCollections(self, *args, **kwargs): # real signature unknown
        pass
    def setCheckActionList(self, *args, **kwargs): # real signature unknown
        pass
    def setClearButtonsShown(self, *args, **kwargs): # real signature unknown
        pass
    def setModifierlessAllowed(self, *args, **kwargs): # real signature unknown
        pass
    def setShortcut(self, *args, **kwargs): # real signature unknown
        pass
    def shortcutChanged(self, *args, **kwargs): # real signature unknown
        pass
    def __init__(self, *args, **kwargs): # real signature unknown
        pass
 | 
	gpl-2.0 | 8,233,919,471,791,282,000 | 26.586957 | 82 | 0.677699 | false | 
| 
	Stavitsky/nova | 
	nova/tests/unit/scheduler/test_scheduler_utils.py | 
	10 | 
	15657 | 
	# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
Tests For Scheduler Utils
"""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
    """Test case for scheduler utils methods."""
    def setUp(self):
        super(SchedulerUtilsTestCase, self).setUp()
        self.context = 'fake-context'
    @mock.patch('nova.objects.Flavor.get_by_flavor_id')
    def test_build_request_spec_without_image(self, mock_get):
        image = None
        instance = {'uuid': 'fake-uuid'}
        instance_type = objects.Flavor(**test_flavor.fake_flavor)
        mock_get.return_value = objects.Flavor(extra_specs={})
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
        self.mox.ReplayAll()
        request_spec = scheduler_utils.build_request_spec(self.context, image,
                                                          [instance])
        self.assertEqual({}, request_spec['image'])
    def test_build_request_spec_with_object(self):
        instance_type = objects.Flavor()
        instance = fake_instance.fake_instance_obj(self.context)
        with mock.patch.object(instance, 'get_flavor') as mock_get:
            mock_get.return_value = instance_type
            request_spec = scheduler_utils.build_request_spec(self.context,
                                                              None,
                                                              [instance])
            mock_get.assert_called_once_with()
        self.assertIsInstance(request_spec['instance_properties'], dict)
    @mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock())
    @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
    @mock.patch.object(objects.Instance, 'save')
    def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get):
        expected_uuid = 'fake-uuid'
        request_spec = dict(instance_properties=dict(uuid='other-uuid'))
        updates = dict(vm_state='fake-vm-state')
        service = 'fake-service'
        method = 'fake-method'
        exc_info = 'exc_info'
        payload = dict(request_spec=request_spec,
                       instance_properties=request_spec.get(
                           'instance_properties', {}),
                       instance_id=expected_uuid,
                       state='fake-vm-state',
                       method=method,
                       reason=exc_info)
        event_type = '%s.%s' % (service, method)
        scheduler_utils.set_vm_state_and_notify(self.context,
                                                expected_uuid,
                                                service,
                                                method,
                                                updates,
                                                exc_info,
                                                request_spec,
                                                db)
        mock_save.assert_called_once_with()
        mock_add.assert_called_once_with(self.context, mock.ANY,
                                         exc_info, mock.ANY)
        self.assertIsInstance(mock_add.call_args[0][1], objects.Instance)
        self.assertIsInstance(mock_add.call_args[0][3], tuple)
        mock_get.return_value.error.assert_called_once_with(self.context,
                                                            event_type,
                                                            payload)
    def _test_populate_filter_props(self, host_state_obj=True,
                                    with_retry=True,
                                    force_hosts=None,
                                    force_nodes=None):
        if force_hosts is None:
            force_hosts = []
        if force_nodes is None:
            force_nodes = []
        if with_retry:
            if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
                 or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
                filter_properties = dict(force_hosts=force_hosts,
                                         force_nodes=force_nodes)
            elif len(force_hosts) > 1 or len(force_nodes) > 1:
                filter_properties = dict(retry=dict(hosts=[]),
                                         force_hosts=force_hosts,
                                         force_nodes=force_nodes)
            else:
                filter_properties = dict(retry=dict(hosts=[]))
        else:
            filter_properties = dict()
        if host_state_obj:
            class host_state(object):
                host = 'fake-host'
                nodename = 'fake-node'
                limits = 'fake-limits'
        else:
            host_state = dict(host='fake-host',
                              nodename='fake-node',
                              limits='fake-limits')
        scheduler_utils.populate_filter_properties(filter_properties,
                                                   host_state)
        enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
        enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
        if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
            # So we can check for 2 hosts
            scheduler_utils.populate_filter_properties(filter_properties,
                                                       host_state)
        if force_hosts:
            expected_limits = None
        else:
            expected_limits = 'fake-limits'
        self.assertEqual(expected_limits,
                         filter_properties.get('limits'))
        if (with_retry and enable_retry_force_hosts
                       and enable_retry_force_nodes):
            self.assertEqual([['fake-host', 'fake-node'],
                              ['fake-host', 'fake-node']],
                             filter_properties['retry']['hosts'])
        else:
            self.assertNotIn('retry', filter_properties)
    def test_populate_filter_props(self):
        self._test_populate_filter_props()
    def test_populate_filter_props_host_dict(self):
        self._test_populate_filter_props(host_state_obj=False)
    def test_populate_filter_props_no_retry(self):
        self._test_populate_filter_props(with_retry=False)
    def test_populate_filter_props_force_hosts_no_retry(self):
        self._test_populate_filter_props(force_hosts=['force-host'])
    def test_populate_filter_props_force_nodes_no_retry(self):
        self._test_populate_filter_props(force_nodes=['force-node'])
    def test_populate_filter_props_multi_force_hosts_with_retry(self):
        self._test_populate_filter_props(force_hosts=['force-host1',
                                                      'force-host2'])
    def test_populate_filter_props_multi_force_nodes_with_retry(self):
        self._test_populate_filter_props(force_nodes=['force-node1',
                                                      'force-node2'])
    @mock.patch.object(scheduler_utils, '_max_attempts')
    def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
        _max_attempts.return_value = 2
        msg = 'The exception text was preserved!'
        filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
                                            exc=[msg]))
        nvh = self.assertRaises(exception.NoValidHost,
                                scheduler_utils.populate_retry,
                                filter_properties, 'fake-uuid')
        # make sure 'msg' is a substring of the complete exception text
        self.assertIn(msg, nvh.message)
    def _check_parse_options(self, opts, sep, converter, expected):
        good = scheduler_utils.parse_options(opts,
                                             sep=sep,
                                             converter=converter)
        for item in expected:
            self.assertIn(item, good)
    def test_parse_options(self):
        # check normal
        self._check_parse_options(['foo=1', 'bar=-2.1'],
                                  '=',
                                  float,
                                  [('foo', 1.0), ('bar', -2.1)])
        # check convert error
        self._check_parse_options(['foo=a1', 'bar=-2.1'],
                                  '=',
                                  float,
                                  [('bar', -2.1)])
        # check separator missing
        self._check_parse_options(['foo', 'bar=-2.1'],
                                  '=',
                                  float,
                                  [('bar', -2.1)])
        # check key missing
        self._check_parse_options(['=5', 'bar=-2.1'],
                                  '=',
                                  float,
                                  [('bar', -2.1)])
    def test_validate_filters_configured(self):
        self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
        self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
        self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
        self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
    def _create_server_group(self, policy='anti-affinity'):
        instance = fake_instance.fake_instance_obj(self.context,
                params={'host': 'hostA'})
        group = objects.InstanceGroup()
        group.name = 'pele'
        group.uuid = str(uuid.uuid4())
        group.members = [instance.uuid]
        group.policies = [policy]
        return group
    def _get_group_details(self, group, policy=None):
        group_hosts = ['hostB']
        with contextlib.nested(
            mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
                              return_value=group),
            mock.patch.object(objects.InstanceGroup, 'get_hosts',
                              return_value=['hostA']),
        ) as (get_group, get_hosts):
            scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
            scheduler_utils._SUPPORTS_AFFINITY = None
            group_info = scheduler_utils._get_group_details(
                self.context, 'fake_uuid', group_hosts)
            self.assertEqual(
                (set(['hostA', 'hostB']), [policy]),
                group_info)
    def test_get_group_details(self):
        for policy in ['affinity', 'anti-affinity']:
            group = self._create_server_group(policy)
            self._get_group_details(group, policy=policy)
    def test_get_group_details_with_no_affinity_filters(self):
        self.flags(scheduler_default_filters=['fake'])
        scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
        scheduler_utils._SUPPORTS_AFFINITY = None
        group_info = scheduler_utils._get_group_details(self.context,
                                                        'fake-uuid')
        self.assertIsNone(group_info)
    def test_get_group_details_with_no_instance_uuid(self):
        self.flags(scheduler_default_filters=['fake'])
        scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
        scheduler_utils._SUPPORTS_AFFINITY = None
        group_info = scheduler_utils._get_group_details(self.context, None)
        self.assertIsNone(group_info)
    def _get_group_details_with_filter_not_configured(self, policy):
        wrong_filter = {
            'affinity': 'ServerGroupAntiAffinityFilter',
            'anti-affinity': 'ServerGroupAffinityFilter',
        }
        self.flags(scheduler_default_filters=[wrong_filter[policy]])
        instance = fake_instance.fake_instance_obj(self.context,
                params={'host': 'hostA'})
        group = objects.InstanceGroup()
        group.uuid = str(uuid.uuid4())
        group.members = [instance.uuid]
        group.policies = [policy]
        with contextlib.nested(
            mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
                              return_value=group),
            mock.patch.object(objects.InstanceGroup, 'get_hosts',
                              return_value=['hostA']),
        ) as (get_group, get_hosts):
            scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
            scheduler_utils._SUPPORTS_AFFINITY = None
            self.assertRaises(exception.UnsupportedPolicyException,
                              scheduler_utils._get_group_details,
                              self.context, 'fake-uuid')
    def test_get_group_details_with_filter_not_configured(self):
        policies = ['anti-affinity', 'affinity']
        for policy in policies:
            self._get_group_details_with_filter_not_configured(policy)
    @mock.patch.object(scheduler_utils, '_get_group_details')
    def test_setup_instance_group_in_filter_properties(self, mock_ggd):
        mock_ggd.return_value = scheduler_utils.GroupDetails(
            hosts=set(['hostA', 'hostB']), policies=['policy'])
        spec = {'instance_properties': {'uuid': 'fake-uuid'}}
        filter_props = {'group_hosts': ['hostC']}
        scheduler_utils.setup_instance_group(self.context, spec, filter_props)
        mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
                                         ['hostC'])
        expected_filter_props = {'group_updated': True,
                                 'group_hosts': set(['hostA', 'hostB']),
                                 'group_policies': ['policy']}
        self.assertEqual(expected_filter_props, filter_props)
    @mock.patch.object(scheduler_utils, '_get_group_details')
    def test_setup_instance_group_with_no_group(self, mock_ggd):
        mock_ggd.return_value = None
        spec = {'instance_properties': {'uuid': 'fake-uuid'}}
        filter_props = {'group_hosts': ['hostC']}
        scheduler_utils.setup_instance_group(self.context, spec, filter_props)
        mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
                                         ['hostC'])
        self.assertNotIn('group_updated', filter_props)
        self.assertNotIn('group_policies', filter_props)
        self.assertEqual(['hostC'], filter_props['group_hosts'])
    @mock.patch.object(scheduler_utils, '_get_group_details')
    def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
        mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
        spec = {'instance_properties': {'uuid': 'fake-uuid'}}
        filter_props = {'group_hosts': ['hostC']}
        self.assertRaises(exception.NoValidHost,
                          scheduler_utils.setup_instance_group,
                          self.context, spec, filter_props)
 | 
	apache-2.0 | 3,205,757,569,836,244,000 | 43.104225 | 78 | 0.548445 | false | 
| 
	einaru/luma | 
	luma/plugins/browser/AddAttributeWizard.py | 
	3 | 
	8525 | 
	# -*- coding: utf-8 -*-
#
# Copyright (c) 2011
#     Per Ove Ringdal
#
# Copyright (C) 2004
#     Wido Depping, <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see http://www.gnu.org/licenses/
import os.path
import copy
import PyQt4
from PyQt4.QtCore import QString, pyqtSlot
from PyQt4.QtGui import QWizard
from .gui.AddAttributeWizardDesign import Ui_AddAttributeWizardDesign
from base.backend.ObjectClassAttributeInfo import ObjectClassAttributeInfo
from base.util.IconTheme import pixmapFromTheme
class AddAttributeWizard(QWizard, Ui_AddAttributeWizardDesign):
    def __init__(self, parent = None, flags = PyQt4.QtCore.Qt.Widget):
        QWizard.__init__(self, parent, flags)
        self.setupUi(self)
        # need to initialize the pages before connecting signals
        self.restart()
        attributePixmap = pixmapFromTheme(
            "addattribute", ":/icons/64/add-attribute")
        objectclassPixmap = pixmapFromTheme(
            "objectclass", ":/icons/64/objectclass")
        self.imageLabel.setPixmap(attributePixmap)
        self.objectclassLabel.setPixmap(objectclassPixmap)
        self.enableAllBox.toggled.connect(self.initAttributeBox)
        self.attributeBox.activated[str].connect(self.newSelection)
        self.classBox.itemSelectionChanged.connect(self.classSelection)
        # attribute values of the current ldap object
        self.OBJECTVALUES = None
        # schema information for the ldap server
        self.SCHEMAINFO = None
        # set of attributes which are possible with the current objectclasses
        self.possibleAttributes = None
        # set of all attributes which are supported by the server
        self.allPossibleAttributes = None
###############################################################################
    def setData(self, smartObject):
        """ Sets the current object data, schema information and initializes
        the attribute box and wizard buttons.
        """
        self.smartObject = smartObject
        self.SCHEMAINFO = ObjectClassAttributeInfo(self.smartObject.getServerMeta())
        self.processData()
        self.initAttributeBox()
        currentPageWidget = self.page(0)
        #self.button(QWizard.FinishButton).setDisabled(False)
        #self.button(QWizard.NextButton).setDisabled(True)
###############################################################################
    def processData(self):
        """ Compute all attributes which can be added according to the data of
        the object. Single values which are already given are sorted out.
        """
        possibleMust, possibleMay = self.smartObject.getPossibleAttributes()
        # attributes used by the current objectClass
        #usedAttributes = set(objectAttributes).difference(set(['objectClass']))
        usedAttributes = self.smartObject.getAttributeList()
        # set of attribute which are used and have to be single
        singleAttributes = set(filter(self.SCHEMAINFO.isSingle, usedAttributes))
        # create a set of attributes which may be added
        self.possibleAttributes = (possibleMust.union(possibleMay)).difference(singleAttributes)
        self.possibleAttributes = map(lambda x: x.lower(), self.possibleAttributes)
        # create a set of attributes which are supported by the server
        self.allPossibleAttributes = set(self.SCHEMAINFO.attributeDict.keys()).difference(singleAttributes)
###############################################################################
    def initAttributeBox(self):
        self.attributeBox.clear()
        currentPageWidget = self.currentPage()
        showAll = self.enableAllBox.isChecked()
        currentPageWidget.setFinalPage(True)
        currentPageWidget.setCommitPage(False)
        #self.button(QWizard.FinishButton).setDisabled(False)
        tmpList = None
        if showAll:
            tmpList = copy.deepcopy(self.allPossibleAttributes)
        else:
            tmpList = copy.deepcopy(self.possibleAttributes)
        structuralClass = self.smartObject.getStructuralClasses()
        # only show attributes whose objectclass combinations don't violate
        # the objectclass chain (not two structural classes)
        if len(structuralClass) > 0:
            classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), self.SCHEMAINFO.getObjectClasses())
            for x in structuralClass:
                classList += self.SCHEMAINFO.getParents(x)
            for x in self.smartObject.getObjectClasses():
                if not (x in classList):
                    classList.append(x)
            mustAttributes, mayAttributes = self.SCHEMAINFO.getAllAttributes(classList)
            attributeList = mustAttributes.union(mayAttributes)
            cleanList = filter(lambda x: x.lower() in tmpList, attributeList)
            tmpList = cleanList
        else:
            self.enableAllBox.setChecked(True)
            self.enableAllBox.setEnabled(False)
            tmpList = sorted(self.allPossibleAttributes)
        tmpList.sort()
        tmpList = filter(lambda x: not (x.lower() == "objectclass"), tmpList)
        map(self.attributeBox.addItem, tmpList)
        self.newSelection(self.attributeBox.currentText())
###############################################################################
    @pyqtSlot(int)
    def newSelection(self, attribute):
        pass
    @pyqtSlot("QString")
    def newSelection(self, attribute):
        attribute = str(attribute).lower()
        currentPageWidget = self.currentPage()
        mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute)
        tmpSet = mustSet.union(maySet)
        if (attribute in self.possibleAttributes) or (len(tmpSet) == 0):
            currentPageWidget.setFinalPage(True)
            #self.button(QWizard.FinishButton).setDisabled(False)
            self.button(QWizard.NextButton).setDisabled(True)
        else:
            currentPageWidget.setFinalPage(False)
            #self.button(QWizard.FinishButton).setDisabled(True)
            self.button(QWizard.NextButton).setDisabled(False)
###############################################################################
    def initClassPage(self):
        currentPageWidget = self.currentPage()
        #self.button(QWizard.FinishButton).setDisabled(True)
        self.classBox.clear()
        self.mustAttributeBox.clear()
        attribute = str(self.attributeBox.currentText())
        mustSet, maySet = self.SCHEMAINFO.getAllObjectclassesForAttr(attribute)
        classList = mustSet.union(maySet)
        if self.smartObject.hasStructuralClass():
            structList = filter(lambda x: self.SCHEMAINFO.isStructural(x), classList)
            classList = filter(lambda x: not self.SCHEMAINFO.isStructural(x), classList)
            for x in structList:
                for y in self.smartObject.getObjectClasses():
                    if self.SCHEMAINFO.sameObjectClassChain(x, y):
                        classList.append(x)
        else:
            classList = sorted(classList)
        classList.sort()
        map(self.classBox.addItem, classList)
        self.classBox.setCurrentRow(0)
###############################################################################
    def classSelection(self):
        self.mustAttributeBox.clear()
        objectclass = str(self.classBox.currentItem().text())
        mustAttributes = self.SCHEMAINFO.getAllMusts([objectclass])
        attribute = set([str(self.attributeBox.currentText())])
        map(self.mustAttributeBox.addItem, mustAttributes.difference(attribute))
        currentPageWidget = self.currentPage()
        #self.button(QWizard.FinishButton).setDisabled(False)
###############################################################################
    def initializePage(self, id):
        if id == 1:
            self.initClassPage()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
 | 
	gpl-2.0 | -6,101,912,037,820,293,000 | 36.888889 | 113 | 0.637654 | false | 
| 
	SyndicateLtd/SyndicateQT | 
	test/functional/wallet_zapwallettxes.py | 
	3 | 
	2839 | 
	#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two bitcoind nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
  transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
  the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
  transactions are still available, but that the unconfirmed transaction has
  been zapped.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
    assert_equal,
    assert_raises_rpc_error,
    wait_until,
)
class ZapWalletTXesTest (BitcoinTestFramework):
    def set_test_params(self):
        self.setup_clean_chain = True
        self.num_nodes = 2
    def run_test(self):
        self.log.info("Mining blocks...")
        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[1].generate(101)
        self.sync_all()
        assert_equal(self.nodes[0].getbalance(), 250)
        # This transaction will be confirmed
        txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
        self.sync_all()
        self.nodes[0].generate(1)
        self.sync_all()
        # This transaction will not be confirmed
        txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
        # Confirmed and unconfirmed transactions are now in the wallet.
        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
        # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
        self.stop_node(0)
        self.start_node(0)
        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
        # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
        # transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
        self.stop_node(0)
        self.start_node(0, ["-zapwallettxes=2"])
        # tx1 is still be available because it was confirmed
        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        # This will raise an exception because the unconfirmed transaction has been zapped
        assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
    ZapWalletTXesTest().main()
 | 
	mit | 8,850,313,853,761,733,000 | 39.557143 | 112 | 0.697076 | false | 
| 
	ema/conpaas | 
	conpaas-services/src/conpaas/services/htc/manager/get_run_time.py | 
	2 | 
	6391 | 
	#import os
import sys
import time
import xmltodict
import pprint 
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
testing = False
# def poll_condor(jonbr, bagnr):
def poll_condor(filename):
    # filename = "hist-%d-%d.xml" % ( jobnr, bagnr )
    # command = "condor_history -constraint 'HtcJob == %d && HtcBag == %d' -xml > %s" % ( jobnr, bagnr, filename )
    # os.system( command )
    tries = 0
    poll_dict = {}
    while tries < 4:
        tries += 1
        _trystr = "Try %d (%s) :" % (tries, filename)
        xml = open(filename).read()
        xmldict = xmltodict.parse(xml)
        print >> sys.stderr, "type(xmldict) = ", type(xmldict)
        if not ( type(xmldict) == dict and xmldict.has_key('classads') ):
            print >> sys.stderr, _trystr, "No classads, wait a little until the first results come in"
            time.sleep(2)
            continue
        print >> sys.stderr, "type(xmldict['classads']) = ", type(xmldict['classads'])
        if not ( type(xmldict['classads']) == dict and xmldict['classads'].has_key('c') ) :
            print >> sys.stderr, _trystr, "No classads <c> entries, wait a little until the first results come in"
            time.sleep(2)
            continue
        print >> sys.stderr, "type(xmldict['classads']['c']) = ", type(xmldict['classads']['c'])
        if not ( type(xmldict['classads']['c']) == list and xmldict['classads']['c'][0].has_key('a') ) :
            print >> sys.stderr, _trystr, "No classads attributes, wait a little until the first results come in"
            time.sleep(2)
            continue
        poll_dict = get_poll_dict(xmldict)
        break
        # if poll_dict['CompletedTasks'] == poll_dict['TotalTask']:
    #pp.pprint(xmldict)
    return poll_dict
def get_poll_dict(xmldict):
        if testing:
            print >> sys.stderr, "selecting info from file %s, job %s, bag %s" % (filename, jobnr, bagnr)
        res_dict = {}
        # print >> sys.stderr, xml
        # print "----"
        # jobid = 0
        for c in xmldict['classads']['c']:
                tempdict = {}
                # pp.pprint(c)
                attrs=c['a']
                # pp.pprint(attrs)
                for d in attrs:
                        v = None
                        k = d['@n'].encode('ascii', 'ignore')   # get rid of unicode from xmltodict
                        # handle float
                        if d.has_key('r'):
                                v=float( d['r'].encode('ascii', 'ignore') )      # get rid of unicode from xmltodict
                        # handle int
                        if d.has_key('i'):
                                v=int( d['i'].encode('ascii', 'ignore') )      # get rid of unicode from xmltodict
                        # handle string
                        if d.has_key('s'):
                                # pp.pprint(d)
                                if d['s'] == None:
                                        v = 'None'
                                else:
                                        v= d['s'].encode('ascii', 'ignore')      # get rid of unicode from xmltodict
                        # handle boolean
                        if d.has_key('b'):
                                # pp.pprint(d)
                                v= 'True' if d['b']['@v'] == 't' else 'False'
                        # handle expression
                        if d.has_key('e'):
                                v= d['e'].encode('ascii', 'ignore')       # get rid of unicode from xmltodict
                        if v != None:
                                tempdict[k] = v
                        else:
                                print "unknown datatype in "
                                pp.pprint(d)
                attrdict = {}
                for k in [ 'HtcJob', 'HtcBag', 'HtcTask', 
                    'RemoteWallClockTime', 'Cmd', 
                    'MATCH_EXP_MachineCloudMachineType' ]:
                        if tempdict.has_key(k):
                            attrdict[k] = tempdict[k]
                #print kl
                # cur_jobnr = "%(HtcJob)s" % tempdict
                # if not ( jobnr == None or jobnr == cur_jobnr):
                #         continue
                # cur_bagnr = "%(HtcBag)s" % tempdict
                # if not ( bagnr == None or bagnr == cur_bagnr):
                #         continue
                # tasknr = "%(HtcTask)s" % taskdict
                taskid = "%(HtcJob)s.%(HtcBag)s.%(HtcTask)s" % tempdict
                #jobid += 1
                # print "----"
                if res_dict.has_key(taskid):
                        res_dict[taskid].append ( attrdict )
                else:
                        res_dict[taskid] = [ attrdict ]
        if testing:
            print >> sys.stderr, "====== res_dict ======"
            pp.pprint(res_dict) 
            print >> sys.stderr, "------ res_dict ------"
        return res_dict
"""
{   'tasks':
    {  
        taskid: 
        [
            {
                attr1: val1,
                attrn: valn,
            },
            {
                attr1: val1,
                attrn: valn,
            }
        ]
    }
}
"""
def do_test(filename):
        poll_dict = poll_condor(filename)
        completed_tasks = 0
        for _ in poll_dict.keys():
            completed_tasks += len(poll_dict[_])
        completed_task_sets = poll_dict.keys().__len__()
        print >> sys.stderr, "Found %d completed tasks in %d sets" % (completed_tasks, completed_task_sets)
        if False:
            pp.pprint(poll_dict)
if __name__ == "__main__":
        pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
        testing = True
        usage = "usage : %s ClassAd_XML_file [ jobnr [ bagnr ] ]" % sys.argv[0]
        argc = len(sys.argv)
        jobnr = None
        bagnr = None
        print "%d args" % argc
        if argc <= 1:
                print usage
                filename = "test3.xml"
        if argc >= 2:
                filename = sys.argv[1]
                print "file = %s" % filename
        if argc >= 3:
                jobnr = sys.argv[2]
                print "job = %s" % jobnr
        if argc >= 4:
                bagnr = sys.argv[3]
                print "bag = %s" % bagnr
        for _ in [ "test1.xml", "test2.xml", "test3.xml", "test4.xml" ] :
            do_test( _ )
 | 
	bsd-3-clause | -5,384,936,284,092,089,000 | 35.942197 | 116 | 0.434517 | false | 
| 
	leiferikb/bitpop | 
	depot_tools/third_party/boto/mashups/interactive.py | 
	119 | 
	2737 | 
	# Copyright (C) 2003-2007  Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
import socket
import sys
# windows does not have termios...
try:
    import termios
    import tty
    has_termios = True
except ImportError:
    has_termios = False
def interactive_shell(chan):
    if has_termios:
        posix_shell(chan)
    else:
        windows_shell(chan)
def posix_shell(chan):
    import select
    
    oldtty = termios.tcgetattr(sys.stdin)
    try:
        tty.setraw(sys.stdin.fileno())
        tty.setcbreak(sys.stdin.fileno())
        chan.settimeout(0.0)
        while True:
            r, w, e = select.select([chan, sys.stdin], [], [])
            if chan in r:
                try:
                    x = chan.recv(1024)
                    if len(x) == 0:
                        print '\r\n*** EOF\r\n',
                        break
                    sys.stdout.write(x)
                    sys.stdout.flush()
                except socket.timeout:
                    pass
            if sys.stdin in r:
                x = sys.stdin.read(1)
                if len(x) == 0:
                    break
                chan.send(x)
    finally:
        termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
    
# thanks to Mike Looijmans for this code
def windows_shell(chan):
    import threading
    sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
        
    def writeall(sock):
        while True:
            data = sock.recv(256)
            if not data:
                sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
                sys.stdout.flush()
                break
            sys.stdout.write(data)
            sys.stdout.flush()
        
    writer = threading.Thread(target=writeall, args=(chan,))
    writer.start()
        
    try:
        while True:
            d = sys.stdin.read(1)
            if not d:
                break
            chan.send(d)
    except EOFError:
        # user hit ^Z or F6
        pass
 | 
	gpl-3.0 | 4,704,446,714,513,671,000 | 27.216495 | 93 | 0.565583 | false | 
| 
	eeshangarg/zulip | 
	zilencer/management/commands/add_new_realm.py | 
	6 | 
	1137 | 
	from typing import Any
from zerver.lib.actions import bulk_add_subscriptions, do_create_realm, do_create_user
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.onboarding import send_initial_realm_messages
from zerver.models import Realm, UserProfile
class Command(ZulipBaseCommand):
    help = """Add a new realm and initial user for manual testing of the onboarding process."""
    def handle(self, **options: Any) -> None:
        string_id = "realm{:02}".format(Realm.objects.filter(string_id__startswith="realm").count())
        realm = do_create_realm(string_id, string_id)
        name = "{:02}-user".format(UserProfile.objects.filter(email__contains="user@").count())
        user = do_create_user(
            f"{name}@{string_id}.zulip.com",
            "password",
            realm,
            name,
            role=UserProfile.ROLE_REALM_ADMINISTRATOR,
            acting_user=None,
        )
        assert realm.signup_notifications_stream is not None
        bulk_add_subscriptions(realm, [realm.signup_notifications_stream], [user], acting_user=None)
        send_initial_realm_messages(realm)
 | 
	apache-2.0 | 8,829,127,599,415,805,000 | 39.607143 | 100 | 0.671944 | false | 
| 
	jbzdak/edx-platform | 
	lms/djangoapps/shoppingcart/processors/tests/test_CyberSource2.py | 
	164 | 
	18446 | 
	# -*- coding: utf-8 -*-
"""
Tests for the newer CyberSource API implementation.
"""
from mock import patch
from django.test import TestCase
from django.conf import settings
import ddt
from student.tests.factories import UserFactory
from shoppingcart.models import Order, OrderItem
from shoppingcart.processors.CyberSource2 import (
    processor_hash,
    process_postpay_callback,
    render_purchase_form_html,
    get_signed_purchase_params,
    _get_processor_exception_html
)
from shoppingcart.processors.exceptions import (
    CCProcessorSignatureException,
    CCProcessorDataException,
    CCProcessorWrongAmountException
)
@ddt.ddt
class CyberSource2Test(TestCase):
    """
    Test the CyberSource API implementation.  As much as possible,
    this test case should use ONLY the public processor interface
    (defined in shoppingcart.processors.__init__.py).
    Some of the tests in this suite rely on Django settings
    to be configured a certain way.
    """
    COST = "10.00"
    CALLBACK_URL = "/test_callback_url"
    FAILED_DECISIONS = ["DECLINE", "CANCEL", "ERROR"]
    def setUp(self):
        """ Create a user and an order. """
        super(CyberSource2Test, self).setUp()
        self.user = UserFactory()
        self.order = Order.get_cart_for_user(self.user)
        self.order_item = OrderItem.objects.create(
            order=self.order,
            user=self.user,
            unit_cost=self.COST,
            line_cost=self.COST
        )
    def assert_dump_recorded(self, order):
        """
        Verify that this order does have a dump of information from the
        payment processor.
        """
        self.assertNotEqual(order.processor_reply_dump, '')
    def test_render_purchase_form_html(self):
        # Verify that the HTML form renders with the payment URL specified
        # in the test settings.
        # This does NOT test that all the form parameters are correct;
        # we verify that by testing `get_signed_purchase_params()` directly.
        html = render_purchase_form_html(self.order, callback_url=self.CALLBACK_URL)
        self.assertIn('<form action="/shoppingcart/payment_fake" method="post">', html)
        self.assertIn('transaction_uuid', html)
        self.assertIn('signature', html)
        self.assertIn(self.CALLBACK_URL, html)
    def test_get_signed_purchase_params(self):
        params = get_signed_purchase_params(self.order, callback_url=self.CALLBACK_URL)
        # Check the callback URL override
        self.assertEqual(params['override_custom_receipt_page'], self.CALLBACK_URL)
        # Parameters determined by the order model
        self.assertEqual(params['amount'], '10.00')
        self.assertEqual(params['currency'], 'usd')
        self.assertEqual(params['orderNumber'], 'OrderId: {order_id}'.format(order_id=self.order.id))
        self.assertEqual(params['reference_number'], self.order.id)
        # Parameters determined by the Django (test) settings
        self.assertEqual(params['access_key'], '0123456789012345678901')
        self.assertEqual(params['profile_id'], 'edx')
        # Some fields will change depending on when the test runs,
        # so we just check that they're set to a non-empty string
        self.assertGreater(len(params['signed_date_time']), 0)
        self.assertGreater(len(params['transaction_uuid']), 0)
        # Constant parameters
        self.assertEqual(params['transaction_type'], 'sale')
        self.assertEqual(params['locale'], 'en')
        self.assertEqual(params['payment_method'], 'card')
        self.assertEqual(
            params['signed_field_names'],
            ",".join([
                'amount',
                'currency',
                'orderNumber',
                'access_key',
                'profile_id',
                'reference_number',
                'transaction_type',
                'locale',
                'signed_date_time',
                'signed_field_names',
                'unsigned_field_names',
                'transaction_uuid',
                'payment_method',
                'override_custom_receipt_page',
                'override_custom_cancel_page',
            ])
        )
        self.assertEqual(params['unsigned_field_names'], '')
        # Check the signature
        self.assertEqual(params['signature'], self._signature(params))
    # We patch the purchased callback because
    # we're using the OrderItem base class, which throws an exception
    # when item doest not have a course id associated
    @patch.object(OrderItem, 'purchased_callback')
    def test_process_payment_raises_exception(self, purchased_callback):  # pylint: disable=unused-argument
        self.order.clear()
        OrderItem.objects.create(
            order=self.order,
            user=self.user,
            unit_cost=self.COST,
            line_cost=self.COST,
        )
        params = self._signed_callback_params(self.order.id, self.COST, self.COST)
        process_postpay_callback(params)
    # We patch the purchased callback because
    # (a) we're using the OrderItem base class, which doesn't implement this method, and
    # (b) we want to verify that the method gets called on success.
    @patch.object(OrderItem, 'purchased_callback')
    @patch.object(OrderItem, 'pdf_receipt_display_name')
    def test_process_payment_success(self, pdf_receipt_display_name, purchased_callback):  # pylint: disable=unused-argument
        # Simulate a callback from CyberSource indicating that payment was successful
        params = self._signed_callback_params(self.order.id, self.COST, self.COST)
        result = process_postpay_callback(params)
        # Expect that we processed the payment successfully
        self.assertTrue(
            result['success'],
            msg="Payment was not successful: {error}".format(error=result.get('error_html'))
        )
        self.assertEqual(result['error_html'], '')
        # Expect that the item's purchased callback was invoked
        purchased_callback.assert_called_with()
        # Expect that the order has been marked as purchased
        self.assertEqual(result['order'].status, 'purchased')
        self.assert_dump_recorded(result['order'])
    def test_process_payment_rejected(self):
        # Simulate a callback from CyberSource indicating that the payment was rejected
        params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='REJECT')
        result = process_postpay_callback(params)
        # Expect that we get an error message
        self.assertFalse(result['success'])
        self.assertIn(u"did not accept your payment", result['error_html'])
        self.assert_dump_recorded(result['order'])
    def test_process_payment_invalid_signature(self):
        # Simulate a callback from CyberSource indicating that the payment was rejected
        params = self._signed_callback_params(self.order.id, self.COST, self.COST, signature="invalid!")
        result = process_postpay_callback(params)
        # Expect that we get an error message
        self.assertFalse(result['success'])
        self.assertIn(u"corrupted message regarding your charge", result['error_html'])
    def test_process_payment_invalid_order(self):
        # Use an invalid order ID
        params = self._signed_callback_params("98272", self.COST, self.COST)
        result = process_postpay_callback(params)
        # Expect an error
        self.assertFalse(result['success'])
        self.assertIn(u"inconsistent data", result['error_html'])
    def test_process_invalid_payment_amount(self):
        # Change the payment amount (no longer matches the database order record)
        params = self._signed_callback_params(self.order.id, "145.00", "145.00")
        result = process_postpay_callback(params)
        # Expect an error
        self.assertFalse(result['success'])
        self.assertIn(u"different amount than the order total", result['error_html'])
        # refresh data for current order
        order = Order.objects.get(id=self.order.id)
        self.assert_dump_recorded(order)
    def test_process_amount_paid_not_decimal(self):
        # Change the payment amount to a non-decimal
        params = self._signed_callback_params(self.order.id, self.COST, "abcd")
        result = process_postpay_callback(params)
        # Expect an error
        self.assertFalse(result['success'])
        self.assertIn(u"badly-typed value", result['error_html'])
    def test_process_user_cancelled(self):
        # Change the payment amount to a non-decimal
        params = self._signed_callback_params(self.order.id, self.COST, "abcd")
        params['decision'] = u'CANCEL'
        result = process_postpay_callback(params)
        # Expect an error
        self.assertFalse(result['success'])
        self.assertIn(u"you have cancelled this transaction", result['error_html'])
    @patch.object(OrderItem, 'purchased_callback')
    @patch.object(OrderItem, 'pdf_receipt_display_name')
    def test_process_no_credit_card_digits(self, pdf_receipt_display_name, purchased_callback):  # pylint: disable=unused-argument
        # Use a credit card number with no digits provided
        params = self._signed_callback_params(
            self.order.id, self.COST, self.COST,
            card_number='nodigits'
        )
        result = process_postpay_callback(params)
        # Expect that we processed the payment successfully
        self.assertTrue(
            result['success'],
            msg="Payment was not successful: {error}".format(error=result.get('error_html'))
        )
        self.assertEqual(result['error_html'], '')
        self.assert_dump_recorded(result['order'])
        # Expect that the order has placeholders for the missing credit card digits
        self.assertEqual(result['order'].bill_to_ccnum, '####')
    @ddt.data('req_reference_number', 'req_currency', 'decision', 'auth_amount')
    def test_process_missing_parameters(self, missing_param):
        # Remove a required parameter
        params = self._signed_callback_params(self.order.id, self.COST, self.COST)
        del params[missing_param]
        # Recalculate the signature with no signed fields so we can get past
        # signature validation.
        params['signed_field_names'] = 'reason_code,message'
        params['signature'] = self._signature(params)
        result = process_postpay_callback(params)
        # Expect an error
        self.assertFalse(result['success'])
        self.assertIn(u"did not return a required parameter", result['error_html'])
    @patch.object(OrderItem, 'purchased_callback')
    @patch.object(OrderItem, 'pdf_receipt_display_name')
    def test_sign_then_verify_unicode(self, pdf_receipt_display_name, purchased_callback):  # pylint: disable=unused-argument
        params = self._signed_callback_params(
            self.order.id, self.COST, self.COST,
            first_name=u'\u2699'
        )
        # Verify that this executes without a unicode error
        result = process_postpay_callback(params)
        self.assertTrue(result['success'])
        self.assert_dump_recorded(result['order'])
    @ddt.data('string', u'üñîçø∂é')
    def test_get_processor_exception_html(self, error_string):
        """
        Tests the processor exception html message
        """
        for exception_type in [CCProcessorSignatureException, CCProcessorWrongAmountException, CCProcessorDataException]:
            error_msg = error_string
            exception = exception_type(error_msg)
            html = _get_processor_exception_html(exception)
            self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, html)
            self.assertIn('Sorry!', html)
            self.assertIn(error_msg, html)
    def _signed_callback_params(
        self, order_id, order_amount, paid_amount,
        decision='ACCEPT', signature=None, card_number='xxxxxxxxxxxx1111',
        first_name='John'
    ):
        """
        Construct parameters that could be returned from CyberSource
        to our payment callback.
        Some values can be overridden to simulate different test scenarios,
        but most are fake values captured from interactions with
        a CyberSource test account.
        Args:
            order_id (string or int): The ID of the `Order` model.
            order_amount (string): The cost of the order.
            paid_amount (string): The amount the user paid using CyberSource.
        Keyword Args:
            decision (string): Whether the payment was accepted or rejected or declined.
            signature (string): If provided, use this value instead of calculating the signature.
            card_numer (string): If provided, use this value instead of the default credit card number.
            first_name (string): If provided, the first name of the user.
        Returns:
            dict
        """
        # Parameters sent from CyberSource to our callback implementation
        # These were captured from the CC test server.
        signed_field_names = ["transaction_id",
                              "decision",
                              "req_access_key",
                              "req_profile_id",
                              "req_transaction_uuid",
                              "req_transaction_type",
                              "req_reference_number",
                              "req_amount",
                              "req_currency",
                              "req_locale",
                              "req_payment_method",
                              "req_override_custom_receipt_page",
                              "req_bill_to_forename",
                              "req_bill_to_surname",
                              "req_bill_to_email",
                              "req_bill_to_address_line1",
                              "req_bill_to_address_city",
                              "req_bill_to_address_state",
                              "req_bill_to_address_country",
                              "req_bill_to_address_postal_code",
                              "req_card_number",
                              "req_card_type",
                              "req_card_expiry_date",
                              "message",
                              "reason_code",
                              "auth_avs_code",
                              "auth_avs_code_raw",
                              "auth_response",
                              "auth_amount",
                              "auth_code",
                              "auth_trans_ref_no",
                              "auth_time",
                              "bill_trans_ref_no",
                              "signed_field_names",
                              "signed_date_time"]
        # if decision is in FAILED_DECISIONS list then remove  auth_amount from
        # signed_field_names list.
        if decision in self.FAILED_DECISIONS:
            signed_field_names.remove("auth_amount")
        params = {
            # Parameters that change based on the test
            "decision": decision,
            "req_reference_number": str(order_id),
            "req_amount": order_amount,
            "auth_amount": paid_amount,
            "req_card_number": card_number,
            # Stub values
            "utf8": u"✓",
            "req_bill_to_address_country": "US",
            "auth_avs_code": "X",
            "req_card_expiry_date": "01-2018",
            "bill_trans_ref_no": "85080648RYI23S6I",
            "req_bill_to_address_state": "MA",
            "signed_field_names": ",".join(signed_field_names),
            "req_payment_method": "card",
            "req_transaction_type": "sale",
            "auth_code": "888888",
            "req_locale": "en",
            "reason_code": "100",
            "req_bill_to_address_postal_code": "02139",
            "req_bill_to_address_line1": "123 Fake Street",
            "req_card_type": "001",
            "req_bill_to_address_city": "Boston",
            "signed_date_time": "2014-08-18T14:07:10Z",
            "req_currency": "usd",
            "auth_avs_code_raw": "I1",
            "transaction_id": "4083708299660176195663",
            "auth_time": "2014-08-18T140710Z",
            "message": "Request was processed successfully.",
            "auth_response": "100",
            "req_profile_id": "0000001",
            "req_transaction_uuid": "ddd9935b82dd403f9aa4ba6ecf021b1f",
            "auth_trans_ref_no": "85080648RYI23S6I",
            "req_bill_to_surname": "Doe",
            "req_bill_to_forename": first_name,
            "req_bill_to_email": "[email protected]",
            "req_override_custom_receipt_page": "http://localhost:8000/shoppingcart/postpay_callback/",
            "req_access_key": "abcd12345",
        }
        # if decision is in FAILED_DECISIONS list then remove the auth_amount from params dict
        if decision in self.FAILED_DECISIONS:
            del params["auth_amount"]
        # Calculate the signature
        params['signature'] = signature if signature is not None else self._signature(params)
        return params
    def _signature(self, params):
        """
        Calculate the signature from a dictionary of params.
        NOTE: This method uses the processor's hashing method.  That method
        is a thin wrapper of standard library calls, and it seemed overly complex
        to rewrite that code in the test suite.
        Args:
            params (dict): Dictionary with a key 'signed_field_names',
                which is a comma-separated list of keys in the dictionary
                to include in the signature.
        Returns:
            string
        """
        return processor_hash(
            ",".join([
                u"{0}={1}".format(signed_field, params[signed_field])
                for signed_field
                in params['signed_field_names'].split(u",")
            ])
        )
    def test_process_payment_declined(self):
        # Simulate a callback from CyberSource indicating that the payment was declined
        params = self._signed_callback_params(self.order.id, self.COST, self.COST, decision='DECLINE')
        result = process_postpay_callback(params)
        # Expect that we get an error message
        self.assertFalse(result['success'])
        self.assertIn(u"payment was declined", result['error_html'])
 | 
	agpl-3.0 | 8,347,593,669,894,794,000 | 40.804989 | 130 | 0.599371 | false | 
| 
	SOKP/kernel_motorola_msm8226 | 
	tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 
	12980 | 
	5411 | 
	# SchedGui.py - Python extension for perf script, basic GUI code for
#		traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
	import wx
except ImportError:
	raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
	Y_OFFSET = 100
	RECT_HEIGHT = 100
	RECT_SPACE = 50
	EVENT_MARKING_WIDTH = 5
	def __init__(self, sched_tracer, title, parent = None, id = -1):
		wx.Frame.__init__(self, parent, id, title)
		(self.screen_width, self.screen_height) = wx.GetDisplaySize()
		self.screen_width -= 10
		self.screen_height -= 10
		self.zoom = 0.5
		self.scroll_scale = 20
		self.sched_tracer = sched_tracer
		self.sched_tracer.set_root_win(self)
		(self.ts_start, self.ts_end) = sched_tracer.interval()
		self.update_width_virtual()
		self.nr_rects = sched_tracer.nr_rectangles() + 1
		self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
		# whole window panel
		self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
		# scrollable container
		self.scroll = wx.ScrolledWindow(self.panel)
		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
		self.scroll.EnableScrolling(True, True)
		self.scroll.SetFocus()
		# scrollable drawing area
		self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
		self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
		self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
		self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
		self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
		self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
		self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
		self.scroll.Fit()
		self.Fit()
		self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
		self.txt = None
		self.Show(True)
	def us_to_px(self, val):
		return val / (10 ** 3) * self.zoom
	def px_to_us(self, val):
		return (val / self.zoom) * (10 ** 3)
	def scroll_start(self):
		(x, y) = self.scroll.GetViewStart()
		return (x * self.scroll_scale, y * self.scroll_scale)
	def scroll_start_us(self):
		(x, y) = self.scroll_start()
		return self.px_to_us(x)
	def paint_rectangle_zone(self, nr, color, top_color, start, end):
		offset_px = self.us_to_px(start - self.ts_start)
		width_px = self.us_to_px(end - self.ts_start)
		offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
		width_py = RootFrame.RECT_HEIGHT
		dc = self.dc
		if top_color is not None:
			(r, g, b) = top_color
			top_color = wx.Colour(r, g, b)
			brush = wx.Brush(top_color, wx.SOLID)
			dc.SetBrush(brush)
			dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
			width_py -= RootFrame.EVENT_MARKING_WIDTH
			offset_py += RootFrame.EVENT_MARKING_WIDTH
		(r ,g, b) = color
		color = wx.Colour(r, g, b)
		brush = wx.Brush(color, wx.SOLID)
		dc.SetBrush(brush)
		dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
	def update_rectangles(self, dc, start, end):
		start += self.ts_start
		end += self.ts_start
		self.sched_tracer.fill_zone(start, end)
	def on_paint(self, event):
		dc = wx.PaintDC(self.scroll_panel)
		self.dc = dc
		width = min(self.width_virtual, self.screen_width)
		(x, y) = self.scroll_start()
		start = self.px_to_us(x)
		end = self.px_to_us(x + width)
		self.update_rectangles(dc, start, end)
	def rect_from_ypixel(self, y):
		y -= RootFrame.Y_OFFSET
		rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
		height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
		if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
			return -1
		return rect
	def update_summary(self, txt):
		if self.txt:
			self.txt.Destroy()
		self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
	def on_mouse_down(self, event):
		(x, y) = event.GetPositionTuple()
		rect = self.rect_from_ypixel(y)
		if rect == -1:
			return
		t = self.px_to_us(x) + self.ts_start
		self.sched_tracer.mouse_down(rect, t)
	def update_width_virtual(self):
		self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
	def __zoom(self, x):
		self.update_width_virtual()
		(xpos, ypos) = self.scroll.GetViewStart()
		xpos = self.us_to_px(x) / self.scroll_scale
		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
		self.Refresh()
	def zoom_in(self):
		x = self.scroll_start_us()
		self.zoom *= 2
		self.__zoom(x)
	def zoom_out(self):
		x = self.scroll_start_us()
		self.zoom /= 2
		self.__zoom(x)
	def on_key_press(self, event):
		key = event.GetRawKeyCode()
		if key == ord("+"):
			self.zoom_in()
			return
		if key == ord("-"):
			self.zoom_out()
			return
		key = event.GetKeyCode()
		(x, y) = self.scroll.GetViewStart()
		if key == wx.WXK_RIGHT:
			self.scroll.Scroll(x + 1, y)
		elif key == wx.WXK_LEFT:
			self.scroll.Scroll(x - 1, y)
		elif key == wx.WXK_DOWN:
			self.scroll.Scroll(x, y + 1)
		elif key == wx.WXK_UP:
			self.scroll.Scroll(x, y - 1)
 | 
	gpl-2.0 | -7,861,997,407,647,542,000 | 28.407609 | 158 | 0.679357 | false | 
| 
	vponomaryov/rally | 
	rally/plugins/openstack/context/existing_users.py | 
	1 | 
	2614 | 
	# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark import context
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
from rally import objects
from rally import osclients
LOG = logging.getLogger(__name__)
# NOTE(boris-42): This context should be hidden for now and used only by
#                 benchmark engine.  In future during various refactoring of
#                 validation system and rally CI testing we will make it public
@context.context(name="existing_users", order=99, hidden=True)
class ExistingUsers(context.Context):
    """This context supports using existing users in Rally.
       It uses information about deployment to properly
       initialize context["users"] and context["tenants"]
       So there won't be big difference between usage of "users" and
       "existing_users" context.
    """
    # NOTE(boris-42): We don't need to check config schema because
    #                 this is used only by benchmark engine
    CONFIG_SCHEMA = {}
    def __init__(self, ctx):
        super(ExistingUsers, self).__init__(ctx)
        self.context["users"] = []
        self.context["tenants"] = {}
    @rutils.log_task_wrapper(LOG.info, _("Enter context: `existing_users`"))
    def setup(self):
        for user in self.config:
            user_endpoint = objects.Endpoint(**user)
            user_kclient = osclients.Clients(user_endpoint).keystone()
            if user_kclient.tenant_id not in self.context["tenants"]:
                self.context["tenants"][user_kclient.tenant_id] = {
                    "id": user_kclient.tenant_id,
                    "name": user_kclient.tenant_name
                }
            self.context["users"].append({
                "endpoint": user_endpoint,
                "id": user_kclient.user_id,
                "tenant_id": user_kclient.tenant_id
            })
    @rutils.log_task_wrapper(LOG.info, _("Exit context: `existing_users`"))
    def cleanup(self):
        """These users are not managed by Rally, so don't touch them."""
 | 
	apache-2.0 | -8,003,209,098,813,009,000 | 36.342857 | 79 | 0.65264 | false | 
| 
	robwebset/screensaver.weather | 
	resources/lib/settings.py | 
	1 | 
	1308 | 
	# -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
ADDON = xbmcaddon.Addon(id='screensaver.weather')
ADDON_ID = ADDON.getAddonInfo('id')
# Common logging module
def log(txt, loglevel=xbmc.LOGDEBUG):
    if (ADDON.getSetting("logEnabled") == "true") or (loglevel != xbmc.LOGDEBUG):
        if isinstance(txt, str):
            txt = txt.decode("utf-8")
        message = u'%s: %s' % (ADDON_ID, txt)
        xbmc.log(msg=message.encode("utf-8"), level=loglevel)
##############################
# Stores Various Settings
##############################
class Settings():
    DIM_LEVEL = (
        '00000000',
        '11000000',
        '22000000',
        '33000000',
        '44000000',
        '55000000',
        '66000000',
        '77000000',
        '88000000',
        '99000000',
        'AA000000',
        'BB000000',
        'CC000000',
        'DD000000',
        'EE000000'
    )
    @staticmethod
    def getDimValue():
        # The actual dim level (Hex) is one of
        # Where 00000000 is not changed
        # So that is a total of 15 different options
        # FF000000 would be completely black, so we do not use that one
        if ADDON.getSetting("dimLevel"):
            return Settings.DIM_LEVEL[int(ADDON.getSetting("dimLevel"))]
        else:
            return '00000000'
 | 
	gpl-2.0 | -4,987,328,565,707,168,000 | 25.693878 | 81 | 0.542049 | false | 
| 
	niekas/dakis | 
	dakis/website/migrations/openid/0001_initial.py | 
	5 | 
	1240 | 
	# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
    dependencies = [
    ]
    operations = [
        migrations.CreateModel(
            name='OpenIDNonce',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
                ('server_url', models.CharField(max_length=255)),
                ('timestamp', models.IntegerField()),
                ('salt', models.CharField(max_length=255)),
                ('date_created', models.DateTimeField(auto_now_add=True)),
            ],
        ),
        migrations.CreateModel(
            name='OpenIDStore',
            fields=[
                ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
                ('server_url', models.CharField(max_length=255)),
                ('handle', models.CharField(max_length=255)),
                ('secret', models.TextField()),
                ('issued', models.IntegerField()),
                ('lifetime', models.IntegerField()),
                ('assoc_type', models.TextField()),
            ],
        ),
    ]
 | 
	agpl-3.0 | -1,867,539,125,413,394,700 | 34.428571 | 114 | 0.533871 | false | 
| 
	Jonbean/DSSM | 
	classification/utils.py | 
	8 | 
	5542 | 
	'''
Author: Jon Tsai    
Created: May 29 2016
'''
import numpy as np 
import theano
from time import sleep
import sys
def progress_bar(percent, speed):
    i = int(percent)/2
    sys.stdout.write('\r')
    # the exact output you're looking for:
    sys.stdout.write("[%-50s] %d%% %f instances/s" % ('='*i, percent, speed))
    sys.stdout.flush()
    
def combine_sents(sent_set):
    '''
    parameter: sent_set ==> 2D sentences set
                        ==> type: list[list[list]]
    return: sents1D ==> 1D sentences set
                    ==> type: list[list]
    This function will combine 2D sentence set 
    into 1D sentence set. 
    e.g.
    [
        [[sent1], [sent2], [sent3], ..., [sentn]]
        ...
        [[sent1], [sent2], [sent3], ..., [sentn]]
    ]
    ==> 
    [
        [sentences1],
        ...
        [sentencesn]
    ]
    '''
    sents1D = []
    for doc in sent_set:
        combine_sent = np.array([])
        for sent in doc:
            combine_sent = np.concatenate((combine_sent,sent))
        sents1D.append(combine_sent)
    return sents1D
def shuffle_index(length_of_indices_ls):
    '''
    ----------
    parameter: 
    ----------
    length_of_indices_ls: type = int 
    ----------
    return: 
    ----------
    a shuffled numpy array of indices 
    '''
    ls = np.arange(length_of_indices_ls)
    np.random.shuffle(ls)
    return ls
def padding(batch_input_list):
    '''
    ----------
    parameter: 
    ----------
    batch_input_list: type = list(list) 
    ----------
    return: 
    ----------
    numpy.ndarray: shape == (n_batch, max_time_step) 
    '''
    n_batch = len(batch_input_list)
    max_time_step = max([len(batch_input_list[i]) for i in range(n_batch)])
    padding_result = np.zeros((n_batch, max_time_step))
    for batch in range(n_batch):
        padding_result[batch] = np.concatenate((np.asarray(batch_input_list[batch]),
                                                np.zeros(max_time_step - len(batch_input_list[batch]))))
    return padding_result.astype('int64')
def mask_generator(indices_matrix):
    '''
    ----------
    parameter: 
    ----------
    indices_matrix: type = list[list] 
    ----------
    return: 
    ----------
    mask : type = np.ndarray
    a mask matrix of a batch of varied length instances
    '''
    n_batch = len(indices_matrix)
    len_ls = [len(sent) for sent in indices_matrix]
    max_len = max(len_ls)
    mask = np.zeros((n_batch, max_len))
    for i in range(n_batch):
        for j in range(len(indices_matrix[i])):
            mask[i][j] = 1 
    return mask
def mlp_mask_generator(indices_matrix, wemb_size):
    '''
    ----------
    parameter: 
    ----------
    indices_matrix: type = list[list] 
    ----------
    return: 
    ----------
    mask : type = np.ndarray
           mask.shape = (n_batch, wemb_size)
    '''
    n_batch = len(indices_matrix)
    len_ls = [len(sent) for sent in indices_matrix]
    
    mask = np.ones((n_batch, wemb_size))
    for i in range(n_batch):
        mask[i] = mask[i] * len_ls[i]
    return mask
def fake_input_generator(max_index, batch_number, length_range):
    '''
    ----------
    parameter: 
    ----------
    max_index: type = int 
    batch_number: type = int 
    length_range: tuple(int), len(length_range) = 2 
                  e.g. (50, 70)
    ----------
    return: 
    ----------
    fake_data: type = list[list]
               format: fake_data.shape[0] = batch_number
                       length_range[0] <= len(fake_data[i]) <= length_range[1]
                       0 <= fake_data[i][j] <= max_index
    '''    
    max_time_step = length_range[0] + np.random.randint(length_range[1] - length_range[0] + 1)
    
    fake_data = np.zeros((batch_number, max_time_step))
    
    mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX)
    len_range = max_time_step - length_range[0]
    assert len_range >= 0
    #pick a row to be the max length row
    row = np.random.randint(batch_number)
    fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,))
    mask[row] = np.ones(max_time_step)
    for batch in range(batch_number):
        if batch == row:
            continue
        length = length_range[0]+np.random.randint(len_range)
        fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)), 
                                       np.zeros(max_time_step - length)))
        mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length)))
    return (fake_data.astype('int32'), mask)
def fake_data(max_index, batch_number, max_time_step, min_time_step):
    
    fake_data = np.zeros((batch_number, max_time_step))
    
    mask = np.zeros((batch_number, max_time_step)).astype(theano.config.floatX)
    len_range = max_time_step - min_time_step
    assert len_range >= 0
    #pick a row to be the max length row
    row = np.random.randint(batch_number)
    fake_data[row] = np.random.randint(max_index+1, size = (max_time_step,))
    mask[row] = np.ones(max_time_step)
    for batch in range(batch_number):
        if batch == row:
            continue
        length = min_time_step+np.random.randint(len_range)
        fake_data[batch] = np.concatenate((np.random.randint(max_index+1 ,size = (length,)), 
                                       np.zeros(max_time_step - length)))
        mask[batch] = np.concatenate((np.ones(length), np.zeros(max_time_step - length)))
    return (fake_data.astype('int32'), mask) | 
	gpl-3.0 | -28,399,149,772,886,012 | 26.305419 | 104 | 0.544388 | false | 
| 
	Treeki/NewerSMBW | 
	Koopatlas/src/editorui/objects.py | 
	1 | 
	4434 | 
	from common import *
from editorcommon import *
import weakref
class KPEditorObject(KPEditorItem):
	SNAP_TO = (24,24)
	def __init__(self, obj, layer):
		KPEditorItem.__init__(self)
		obj.qtItem = self
		self._objRef = weakref.ref(obj)
		self._layerRef = weakref.ref(layer)
		self._updatePosition()
		self._updateSize()
		self.setAcceptHoverEvents(True)
		self.resizing = None
		if not hasattr(KPEditorObject, 'SELECTION_PEN'):
			KPEditorObject.SELECTION_PEN = QtGui.QPen(Qt.green, 1, Qt.DotLine)
		# I don't bother setting the ZValue because it doesn't quite matter:
		# only one layer's objects are ever clickable, and drawBackground takes
		# care of the layered drawing
	
	def _updatePosition(self):
		self.ignoreMovement = True
		x,y = self._objRef().position
		self.setPos(x*24, y*24)
		self.ignoreMovement = False
	
	def _updateSize(self):
		self.prepareGeometryChange()
		obj = self._objRef()
		w,h = obj.size
		self._boundingRect = QtCore.QRectF(0, 0, w*24, h*24)
		self._selectionRect = QtCore.QRectF(0, 0, w*24-1, h*24-1)
		self._resizerEndXY = (w*24-5, h*24-5)
	
	def paint(self, painter, option, widget):
		if self.isSelected():
			painter.setPen(self.SELECTION_PEN)
			painter.drawRect(self._selectionRect)
	
	def hoverMoveEvent(self, event):
		if self._layerRef() != KP.mapScene.currentLayer:
			self.setCursor(Qt.ArrowCursor)
			return
		pos = event.pos()
		bit = self.resizerPortionAt(pos.x(), pos.y())
		if bit == 1 or bit == 4:
			self.setCursor(Qt.SizeFDiagCursor)
		elif bit == 2 or bit == 3:
			self.setCursor(Qt.SizeBDiagCursor)
		elif bit == 7 or bit == 8:
			self.setCursor(Qt.SizeHorCursor)
		elif bit == 5 or bit == 6:
			self.setCursor(Qt.SizeVerCursor)
		else:
			self.setCursor(Qt.ArrowCursor)
	
	def mousePressEvent(self, event):
		if event.button() == Qt.LeftButton:
			pos = event.pos()
			bit = self.resizerPortionAt(pos.x(), pos.y())
			if self._layerRef() == KP.mapScene.currentLayer and bit:
			# if bit:
				event.accept()
				x, xSide, y, ySide = False, None, False, None
				
				if bit == 1 or bit == 7 or bit == 3:
					x, xSide = True, 1
				elif bit == 2 or bit == 4 or bit == 8:
					x, xSide = True, 0
				if bit == 1 or bit == 2 or bit == 5:
					y, ySide = True, 1
				elif bit == 3 or bit == 4 or bit == 6:
					y, ySide = True, 0
				self.resizing = (x, xSide, y, ySide)
				return
		KPEditorItem.mousePressEvent(self, event)
	
	def _tryAndResize(self, obj, axisIndex, mousePosition, stationarySide):
		objPosition = obj.position[axisIndex]
		objSize = obj.size[axisIndex]
		if stationarySide == 0:
			# Resize the right/bottom side
			relativeMousePosition = mousePosition - objPosition
			newSize = relativeMousePosition + 1
			if newSize == objSize or newSize < 1:
				return False
			if axisIndex == 1:
				obj.size = (obj.size[0], newSize)
			else:
				obj.size = (newSize, obj.size[1])
		else:
			# Resize the left/top side
			rightSide = objPosition + objSize - 1
			newLeftSide = mousePosition
			newPosition = newLeftSide
			newSize = rightSide - newLeftSide + 1
			if newSize < 1:
				return False
			if newPosition == objPosition and newSize == objSize:
				return False
			if axisIndex == 1:
				obj.position = (obj.position[0], newPosition)
				obj.size = (obj.size[0], newSize)
			else:
				obj.position = (newPosition, obj.position[1])
				obj.size = (newSize, obj.size[1])
		return True
	def mouseMoveEvent(self, event):
		if self.resizing:
			obj = self._objRef()
			scenePos = event.scenePos()
			hasChanged = False
			resizeX, xSide, resizeY, ySide = self.resizing
			if resizeX:
				hasChanged |= self._tryAndResize(obj, 0, int(scenePos.x() / 24), xSide)
			if resizeY:
				hasChanged |= self._tryAndResize(obj, 1, int(scenePos.y() / 24), ySide)
			if hasChanged:
				obj.updateCache()
				self._layerRef().updateCache()
				self._updatePosition()
				self._updateSize()
		else:
			KPEditorItem.mouseMoveEvent(self, event)
	
	def mouseReleaseEvent(self, event):
		if self.resizing and event.button() == Qt.LeftButton:
			self.resizing = None
		else:
			KPEditorItem.mouseReleaseEvent(self, event)
	
	def _itemMoved(self, oldX, oldY, newX, newY):
		obj = self._objRef()
		obj.position = (newX/24, newY/24)
		self._layerRef().updateCache()
	def remove(self, withItem=False):
		obj = self._objRef()
		layer = self._layerRef()
		layer.objects.remove(obj)
		layer.updateCache()
		if withItem:
			self.scene().removeItem(self)
 | 
	mit | -8,735,065,201,513,281,000 | 22.967568 | 75 | 0.662382 | false | 
| 
	eestay/edx-ora2 | 
	scripts/render_templates.py | 
	7 | 
	3912 | 
	#!/usr/bin/env python
"""
Render Django templates.
Useful for generating fixtures for the JavaScript unit test suite.
Usage:
    python render_templates.py path/to/templates.json
where "templates.json" is a JSON file of the form:
    [
        {
            "template": "openassessmentblock/oa_base.html",
            "context": {
                "title": "Lorem",
                "question": "Ipsum?"
            },
            "output": "oa_base.html"
        },
        ...
    ]
The rendered templates are saved to "output" relative to the
templates.json file's directory.
"""
import sys
import os.path
import json
import re
import dateutil.parser
import pytz
# This is a bit of a hack to ensure that the root repo directory
# is in the Python path, so Django can find the settings module.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from django.template.context import Context
from django.template.loader import get_template
USAGE = u"{prog} TEMPLATE_DESC"
DATETIME_REGEX = re.compile("^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}$")
def parse_dates(context):
    """
    Transform datetime strings into Python datetime objects.
    JSON does not provide a standard way to serialize datetime objects,
    but some of the templates expect that the context contains
    Python datetime objects.
    This (somewhat hacky) solution recursively searches the context
    for formatted datetime strings of the form "2014-01-02T12:34"
    and converts them to Python datetime objects with the timezone
    set to UTC.
    Args:
        context (JSON-serializable): The context (or part of the context)
            that will be passed to the template.  Dictionaries and lists
            will be recursively searched and transformed.
    Returns:
        JSON-serializable of the same type as the `context` argument.
    """
    if isinstance(context, dict):
        return {
            key: parse_dates(value)
            for key, value in context.iteritems()
        }
    elif isinstance(context, list):
        return [
            parse_dates(item)
            for item in context
        ]
    elif isinstance(context, basestring):
        if DATETIME_REGEX.match(context) is not None:
            return dateutil.parser.parse(context).replace(tzinfo=pytz.utc)
    return context
def render_templates(root_dir, template_json):
    """
    Create rendered templates.
    Args:
        root_dir (str): The directory in which to write the rendered templates.
        template_json (dict): Description of which templates to render.  Must be a list
            of dicts, each containing keys "template" (str), "context" (dict), and "output" (str).
    Returns:
        None
    """
    for template_dict in template_json:
        template = get_template(template_dict['template'])
        context = parse_dates(template_dict['context'])
        rendered = template.render(Context(context))
        output_path = os.path.join(root_dir, template_dict['output'])
        try:
            with open(output_path, 'w') as output_file:
                output_file.write(rendered.encode('utf-8'))
        except IOError:
            print "Could not write rendered template to file: {}".format(output_path)
            sys.exit(1)
def main():
    """
    Main entry point for the script.
    """
    if len(sys.argv) < 2:
        print USAGE.format(sys.argv[0])
        sys.exit(1)
    try:
        with open(sys.argv[1]) as template_json:
            root_dir = os.path.dirname(sys.argv[1])
            render_templates(root_dir, json.load(template_json))
    except IOError as ex:
        print u"Could not open template description file: {}".format(sys.argv[1])
        print(ex)
        sys.exit(1)
    except ValueError as ex:
        print u"Could not parse template description as JSON: {}".format(sys.argv[1])
        print(ex)
        sys.exit(1)
if __name__ == '__main__':
    main()
 | 
	agpl-3.0 | 2,144,937,601,752,186,000 | 28.413534 | 98 | 0.629601 | false | 
| 
	scalient/ebsmount | 
	cmd_manual.py | 
	2 | 
	2801 | 
	#!/usr/bin/python
# Copyright (c) 2010 Alon Swartz <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of 
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""EBS Mount - manually mount EBS device (simulates udev add trigger)
Arguments:
    device          EBS device to mount (e.g., /dev/xvdf, /dev/vda)
Options:
    --format=FS     Format device prior to mount (e.g., --format=ext3)
"""
import re
import os
import sys
import getopt
import ebsmount
import executil
from utils import config, is_mounted
def usage(e=None):
    if e:
        print >> sys.stderr, "error: " + str(e)
    print >> sys.stderr, "Syntax: %s [-opts] <device>" % sys.argv[0]
    print >> sys.stderr, __doc__.strip()
    sys.exit(1)
def fatal(s):
    print >> sys.stderr, "error: " + str(s)
    sys.exit(1)
def _expected_devpath(devname, devpaths):
    """ugly hack to test expected structure of devpath"""
    raw_output = executil.getoutput('udevadm info -a -n %s' % devname)
    for line in raw_output.splitlines():
        line = line.strip()
        m = re.match("^looking at parent device '(.*)':", line)
        if m:
            devpath = m.group(1)
            for pattern in devpaths:
                if re.search(pattern, devpath):
                    return True
    return False
def main():
    try:
        opts, args = getopt.gnu_getopt(sys.argv[1:], 'h', ['format='])
    except getopt.GetoptError, e:
        usage(e)
    filesystem = None
    for opt, val in opts:
        if opt == '-h':
            usage()
        if opt == '--format':
            filesystem = val
    if not len(args) == 1:
        usage()
    devname = args[0]
    if not os.path.exists(devname):
        fatal("%s does not exist" % devname)
    if not _expected_devpath(devname, config.devpaths.split()):
        fatal("devpath not of expected structure, or failed lookup")
    if filesystem:
        if is_mounted(devname):
            fatal("%s is mounted" % devname)
        if not filesystem in config.filesystems.split():
            fatal("%s is not supported in %s" % (filesystem, config.CONF_FILE))
        executil.system("mkfs." + filesystem, "-q", devname)
    ebsmount.ebsmount_add(devname, config.mountdir)
if __name__=="__main__":
    main()
 | 
	gpl-2.0 | 4,128,131,317,433,193,500 | 26.732673 | 79 | 0.625848 | false | 
| 
	vicky2135/lucious | 
	lucious/lib/python2.7/site-packages/pip/_vendor/progress/__init__.py | 
	916 | 
	3023 | 
	# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
    file = stderr
    sma_window = 10
    def __init__(self, *args, **kwargs):
        self.index = 0
        self.start_ts = time()
        self._ts = self.start_ts
        self._dt = deque(maxlen=self.sma_window)
        for key, val in kwargs.items():
            setattr(self, key, val)
    def __getitem__(self, key):
        if key.startswith('_'):
            return None
        return getattr(self, key, None)
    @property
    def avg(self):
        return sum(self._dt) / len(self._dt) if self._dt else 0
    @property
    def elapsed(self):
        return int(time() - self.start_ts)
    @property
    def elapsed_td(self):
        return timedelta(seconds=self.elapsed)
    def update(self):
        pass
    def start(self):
        pass
    def finish(self):
        pass
    def next(self, n=1):
        if n > 0:
            now = time()
            dt = (now - self._ts) / n
            self._dt.append(dt)
            self._ts = now
        self.index = self.index + n
        self.update()
    def iter(self, it):
        for x in it:
            yield x
            self.next()
        self.finish()
class Progress(Infinite):
    def __init__(self, *args, **kwargs):
        super(Progress, self).__init__(*args, **kwargs)
        self.max = kwargs.get('max', 100)
    @property
    def eta(self):
        return int(ceil(self.avg * self.remaining))
    @property
    def eta_td(self):
        return timedelta(seconds=self.eta)
    @property
    def percent(self):
        return self.progress * 100
    @property
    def progress(self):
        return min(1, self.index / self.max)
    @property
    def remaining(self):
        return max(self.max - self.index, 0)
    def start(self):
        self.update()
    def goto(self, index):
        incr = index - self.index
        self.next(incr)
    def iter(self, it):
        try:
            self.max = len(it)
        except TypeError:
            pass
        for x in it:
            yield x
            self.next()
        self.finish()
 | 
	bsd-3-clause | 8,970,014,737,514,017,000 | 23.577236 | 74 | 0.600397 | false | 
| 
	johndpope/tensorflow | 
	tensorflow/tensorboard/backend/application.py | 
	24 | 
	26886 | 
	# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six import StringIO
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import http_util
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
DEFAULT_SIZE_GUIDANCE = {
    event_accumulator.COMPRESSED_HISTOGRAMS: 500,
    event_accumulator.IMAGES: 10,
    event_accumulator.AUDIO: 10,
    event_accumulator.SCALARS: 1000,
    event_accumulator.HEALTH_PILLS: 100,
    event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
    'bmp': 'image/bmp',
    'gif': 'image/gif',
    'jpeg': 'image/jpeg',
    'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
  image_type = imghdr.what(None, encoded_image_string)
  return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
  """An enum used to list the valid output formats for API calls.
  Not all API calls support all formats (for example, only scalars and
  compressed histograms support CSV).
  """
  JSON = 'json'
  CSV = 'csv'
def standard_tensorboard_wsgi(
    logdir,
    purge_orphaned_data,
    reload_interval,
    plugins):
  """Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
  Args:
    logdir: The path to the directory containing events files.
    purge_orphaned_data: Whether to purge orphaned data.
    reload_interval: The interval at which the backend reloads more data in
        seconds.
    plugins: A list of plugins for TensorBoard to initialize.
  Returns:
    The new TensorBoard WSGI application.
  """
  multiplexer = event_multiplexer.EventMultiplexer(
      size_guidance=DEFAULT_SIZE_GUIDANCE,
      purge_orphaned_data=purge_orphaned_data)
  return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
  """The TensorBoard application, conforming to WSGI spec."""
  # How many samples to include in sampling API calls by default.
  DEFAULT_SAMPLE_COUNT = 10
  # NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
  #                      responses using send_header.
  protocol_version = 'HTTP/1.1'
  def __init__(self, logdir, plugins, multiplexer, reload_interval):
    """Constructs the TensorBoard application.
    Args:
      logdir: the logdir spec that describes where data will be loaded.
        may be a directory, or comma,separated list of directories, or colons
        can be used to provide named directories
      plugins: List of plugins that extend tensorboard.plugins.BasePlugin
      multiplexer: The EventMultiplexer with TensorBoard data to serve
      reload_interval: How often (in seconds) to reload the Multiplexer
    Returns:
      A WSGI application that implements the TensorBoard backend.
    Raises:
      ValueError: If some plugin has no plugin_name
      ValueError: If two plugins have the same plugin_name
    """
    self._logdir = logdir
    self._plugins = plugins
    self._multiplexer = multiplexer
    self.tag = get_tensorboard_tag()
    path_to_run = parse_event_files_spec(self._logdir)
    if reload_interval:
      start_reloading_multiplexer(self._multiplexer, path_to_run,
                                  reload_interval)
    else:
      reload_multiplexer(self._multiplexer, path_to_run)
    self.data_applications = {
        '/app.js':
            self._serve_js,
        DATA_PREFIX + AUDIO_ROUTE:
            self._serve_audio,
        DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
            self._serve_compressed_histograms,
        DATA_PREFIX + GRAPH_ROUTE:
            self._serve_graph,
        DATA_PREFIX + HISTOGRAMS_ROUTE:
            self._serve_histograms,
        DATA_PREFIX + IMAGES_ROUTE:
            self._serve_images,
        DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
            self._serve_individual_audio,
        DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
            self._serve_image,
        DATA_PREFIX + LOGDIR_ROUTE:
            self._serve_logdir,
        # TODO(chizeng): Delete this RPC once we have skylark rules that obviate
        # the need for the frontend to determine which plugins are active.
        DATA_PREFIX + PLUGINS_LISTING_ROUTE:
            self._serve_plugins_listing,
        DATA_PREFIX + RUN_METADATA_ROUTE:
            self._serve_run_metadata,
        DATA_PREFIX + RUNS_ROUTE:
            self._serve_runs,
        DATA_PREFIX + SCALARS_ROUTE:
            self._serve_scalars,
    }
    # Serve the routes from the registered plugins using their name as the route
    # prefix. For example if plugin z has two routes /a and /b, they will be
    # served as /data/plugin/z/a and /data/plugin/z/b.
    plugin_names_encountered = set()
    for plugin in self._plugins:
      if plugin.plugin_name is None:
        raise ValueError('Plugin %s has no plugin_name' % plugin)
      if plugin.plugin_name in plugin_names_encountered:
        raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
      plugin_names_encountered.add(plugin.plugin_name)
      try:
        plugin_apps = plugin.get_plugin_apps(self._multiplexer, self._logdir)
      except Exception as e:  # pylint: disable=broad-except
        logging.warning('Plugin %s failed. Exception: %s', plugin.plugin_name,
                        str(e))
        continue
      for route, app in plugin_apps.items():
        path = DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
        self.data_applications[path] = app
  # We use underscore_names for consistency with inherited methods.
  def _image_response_for_run(self, run_images, run, tag):
    """Builds a JSON-serializable object with information about run_images.
    Args:
      run_images: A list of event_accumulator.ImageValueEvent objects.
      run: The name of the run.
      tag: The name of the tag the images all belong to.
    Returns:
      A list of dictionaries containing the wall time, step, URL, width, and
      height for each image.
    """
    response = []
    for index, run_image in enumerate(run_images):
      response.append({
          'wall_time': run_image.wall_time,
          'step': run_image.step,
          # We include the size so that the frontend can add that to the <img>
          # tag so that the page layout doesn't change when the image loads.
          'width': run_image.width,
          'height': run_image.height,
          'query': self._query_for_individual_image(run, tag, index)
      })
    return response
  def _audio_response_for_run(self, run_audio, run, tag):
    """Builds a JSON-serializable object with information about run_audio.
    Args:
      run_audio: A list of event_accumulator.AudioValueEvent objects.
      run: The name of the run.
      tag: The name of the tag the images all belong to.
    Returns:
      A list of dictionaries containing the wall time, step, URL, and
      content_type for each audio clip.
    """
    response = []
    for index, run_audio_clip in enumerate(run_audio):
      response.append({
          'wall_time': run_audio_clip.wall_time,
          'step': run_audio_clip.step,
          'content_type': run_audio_clip.content_type,
          'query': self._query_for_individual_audio(run, tag, index)
      })
    return response
  def _path_is_safe(self, path):
    """Check path is safe (stays within current directory).
    This is for preventing directory-traversal attacks.
    Args:
      path: The path to check for safety.
    Returns:
      True if the given path stays within the current directory, and false
      if it would escape to a higher directory. E.g. _path_is_safe('index.html')
      returns true, but _path_is_safe('../../../etc/password') returns false.
    """
    base = os.path.abspath(os.curdir)
    absolute_path = os.path.abspath(path)
    prefix = os.path.commonprefix([base, absolute_path])
    return prefix == base
  @wrappers.Request.application
  def _serve_logdir(self, request):
    """Respond with a JSON object containing this TensorBoard's logdir."""
    return http_util.Respond(
        request, {'logdir': self._logdir}, 'application/json')
  @wrappers.Request.application
  def _serve_scalars(self, request):
    """Given a tag and single run, return array of ScalarEvents."""
    # TODO(cassandrax): return HTTP status code for malformed requests
    tag = request.args.get('tag')
    run = request.args.get('run')
    values = self._multiplexer.Scalars(run, tag)
    if request.args.get('format') == _OutputFormat.CSV:
      string_io = StringIO()
      writer = csv.writer(string_io)
      writer.writerow(['Wall time', 'Step', 'Value'])
      writer.writerows(values)
      return http_util.Respond(request, string_io.getvalue(), 'text/csv')
    else:
      return http_util.Respond(request, values, 'application/json')
  @wrappers.Request.application
  def _serve_graph(self, request):
    """Given a single run, return the graph definition in json format."""
    run = request.args.get('run', None)
    if run is None:
      return http_util.Respond(
          request, 'query parameter "run" is required', 'text/plain', 400)
    try:
      graph = self._multiplexer.Graph(run)
    except ValueError:
      return http_util.Respond(
          request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
    limit_attr_size = request.args.get('limit_attr_size', None)
    if limit_attr_size is not None:
      try:
        limit_attr_size = int(limit_attr_size)
      except ValueError:
        return http_util.Respond(
            request, 'query parameter `limit_attr_size` must be integer',
            'text/plain', 400)
    large_attrs_key = request.args.get('large_attrs_key', None)
    try:
      process_graph.prepare_graph_for_ui(graph, limit_attr_size,
                                         large_attrs_key)
    except ValueError as e:
      return http_util.Respond(request, e.message, 'text/plain', 400)
    return http_util.Respond(request, str(graph), 'text/x-protobuf')  # pbtxt
  @wrappers.Request.application
  def _serve_run_metadata(self, request):
    """Given a tag and a TensorFlow run, return the session.run() metadata."""
    tag = request.args.get('tag', None)
    run = request.args.get('run', None)
    if tag is None:
      return http_util.Respond(
          request, 'query parameter "tag" is required', 'text/plain', 400)
    if run is None:
      return http_util.Respond(
          request, 'query parameter "run" is required', 'text/plain', 400)
    try:
      run_metadata = self._multiplexer.RunMetadata(run, tag)
    except ValueError:
      return http_util.Respond(
          request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
    return http_util.Respond(
        request, str(run_metadata), 'text/x-protobuf')  # pbtxt
  @wrappers.Request.application
  def _serve_histograms(self, request):
    """Given a tag and single run, return an array of histogram values."""
    tag = request.args.get('tag')
    run = request.args.get('run')
    values = self._multiplexer.Histograms(run, tag)
    return http_util.Respond(request, values, 'application/json')
  @wrappers.Request.application
  def _serve_compressed_histograms(self, request):
    """Given a tag and single run, return an array of compressed histograms."""
    tag = request.args.get('tag')
    run = request.args.get('run')
    compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
    if request.args.get('format') == _OutputFormat.CSV:
      string_io = StringIO()
      writer = csv.writer(string_io)
      # Build the headers; we have two columns for timing and two columns for
      # each compressed histogram bucket.
      headers = ['Wall time', 'Step']
      if compressed_histograms:
        bucket_count = len(compressed_histograms[0].compressed_histogram_values)
        for i in xrange(bucket_count):
          headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
      writer.writerow(headers)
      for compressed_histogram in compressed_histograms:
        row = [compressed_histogram.wall_time, compressed_histogram.step]
        for value in compressed_histogram.compressed_histogram_values:
          row += [value.rank_in_bps, value.value]
        writer.writerow(row)
      return http_util.Respond(request, string_io.getvalue(), 'text/csv')
    else:
      return http_util.Respond(
          request, compressed_histograms, 'application/json')
  @wrappers.Request.application
  def _serve_images(self, request):
    """Given a tag and list of runs, serve a list of images.
    Note that the images themselves are not sent; instead, we respond with URLs
    to the images. The frontend should treat these URLs as opaque and should not
    try to parse information about them or generate them itself, as the format
    may change.
    Args:
      request: A werkzeug.wrappers.Request object.
    Returns:
      A werkzeug.Response application.
    """
    tag = request.args.get('tag')
    run = request.args.get('run')
    images = self._multiplexer.Images(run, tag)
    response = self._image_response_for_run(images, run, tag)
    return http_util.Respond(request, response, 'application/json')
  @wrappers.Request.application
  def _serve_image(self, request):
    """Serves an individual image."""
    tag = request.args.get('tag')
    run = request.args.get('run')
    index = int(request.args.get('index'))
    image = self._multiplexer.Images(run, tag)[index]
    encoded_image_string = image.encoded_image_string
    content_type = _content_type_for_image(encoded_image_string)
    return http_util.Respond(request, encoded_image_string, content_type)
  def _query_for_individual_image(self, run, tag, index):
    """Builds a URL for accessing the specified image.
    This should be kept in sync with _serve_image. Note that the URL is *not*
    guaranteed to always return the same image, since images may be unloaded
    from the reservoir as new images come in.
    Args:
      run: The name of the run.
      tag: The tag.
      index: The index of the image. Negative values are OK.
    Returns:
      A string representation of a URL that will load the index-th
      sampled image in the given run with the given tag.
    """
    query_string = urllib.parse.urlencode({
        'run': run,
        'tag': tag,
        'index': index
    })
    return query_string
  @wrappers.Request.application
  def _serve_audio(self, request):
    """Given a tag and list of runs, serve a list of audio.
    Note that the audio clips themselves are not sent; instead, we respond with
    URLs to the audio. The frontend should treat these URLs as opaque and should
    not try to parse information about them or generate them itself, as the
    format may change.
    Args:
      request: A werkzeug.wrappers.Request object.
    Returns:
      A werkzeug.Response application.
    """
    tag = request.args.get('tag')
    run = request.args.get('run')
    audio_list = self._multiplexer.Audio(run, tag)
    response = self._audio_response_for_run(audio_list, run, tag)
    return http_util.Respond(request, response, 'application/json')
  @wrappers.Request.application
  def _serve_individual_audio(self, request):
    """Serves an individual audio clip."""
    tag = request.args.get('tag')
    run = request.args.get('run')
    index = int(request.args.get('index'))
    audio = self._multiplexer.Audio(run, tag)[index]
    return http_util.Respond(
        request, audio.encoded_audio_string, audio.content_type)
  def _query_for_individual_audio(self, run, tag, index):
    """Builds a URL for accessing the specified audio.
    This should be kept in sync with _serve_individual_audio. Note that the URL
    is *not* guaranteed to always return the same audio, since audio may be
    unloaded from the reservoir as new audio comes in.
    Args:
      run: The name of the run.
      tag: The tag.
      index: The index of the audio. Negative values are OK.
    Returns:
      A string representation of a URL that will load the index-th
      sampled audio in the given run with the given tag.
    """
    query_string = urllib.parse.urlencode({
        'run': run,
        'tag': tag,
        'index': index
    })
    return query_string
  @wrappers.Request.application
  def _serve_plugins_listing(self, request):
    """Serves an object mapping plugin name to whether it is enabled.
    Args:
      request: The werkzeug.Request object.
    Returns:
      A werkzeug.Response object.
    """
    return http_util.Respond(
        request,
        {plugin.plugin_name: plugin.is_active() for plugin in self._plugins},
        'application/json')
  @wrappers.Request.application
  def _serve_runs(self, request):
    """WSGI app serving a JSON object about runs and tags.
    Returns a mapping from runs to tagType to list of tags for that run.
    Args:
      request: A werkzeug request
    Returns:
      A werkzeug Response with the following content:
      {runName: {images: [tag1, tag2, tag3],
                 audio: [tag4, tag5, tag6],
                 scalars: [tagA, tagB, tagC],
                 histograms: [tagX, tagY, tagZ],
                 firstEventTimestamp: 123456.789}}
    """
    runs = self._multiplexer.Runs()
    for run_name, run_data in runs.items():
      try:
        run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
            run_name)
      except ValueError:
        logging.warning('Unable to get first event timestamp for run %s',
                        run_name)
        run_data['firstEventTimestamp'] = None
    return http_util.Respond(request, runs, 'application/json')
  @wrappers.Request.application
  def _serve_index(self, request):
    """Serves the index page (i.e., the tensorboard app itself)."""
    return self._serve_static_file(request, '/dist/index.html')
  @wrappers.Request.application
  def _serve_js(self, request):
    """Serves the JavaScript for the index page."""
    return self._serve_static_file(request, '/dist/app.js')
  def _serve_static_file(self, request, path):
    """Serves the static file located at the given path.
    Args:
      request: A werkzeug Request
      path: The path of the static file, relative to the tensorboard/ directory.
    Returns:
      A werkzeug.Response application.
    """
    # Strip off the leading forward slash.
    orig_path = path.lstrip('/')
    if not self._path_is_safe(orig_path):
      logging.warning('path not safe: %s', orig_path)
      return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
      # Resource loader wants a path relative to //WORKSPACE/tensorflow.
    path = os.path.join('tensorboard', orig_path)
    # Open the file and read it.
    try:
      contents = resource_loader.load_resource(path)
    except IOError:
      # For compatibility with latest version of Bazel, we renamed bower
      # packages to use '_' rather than '-' in their package name.
      # This means that the directory structure is changed too.
      # So that all our recursive imports work, we need to modify incoming
      # requests to map onto the new directory structure.
      path = orig_path
      components = path.split('/')
      components[0] = components[0].replace('-', '_')
      path = ('/').join(components)
      # Bazel keeps all the external dependencies in //WORKSPACE/external.
      # and resource loader wants a path relative to //WORKSPACE/tensorflow/.
      path = os.path.join('../external', path)
      try:
        contents = resource_loader.load_resource(path)
      except IOError:
        logging.warning('path %s not found, sending 404', path)
        return http_util.Respond(request, 'Not found', 'text/plain', code=404)
    mimetype, content_encoding = mimetypes.guess_type(path)
    mimetype = mimetype or 'application/octet-stream'
    return http_util.Respond(
        request,
        contents,
        mimetype,
        expires=3600,
        content_encoding=content_encoding)
  def __call__(self, environ, start_response):  # pylint: disable=invalid-name
    """Central entry point for the TensorBoard application.
    This method handles routing to sub-applications. It does simple routing
    using regular expression matching.
    This __call__ method conforms to the WSGI spec, so that instances of this
    class are WSGI applications.
    Args:
      environ: See WSGI spec.
      start_response: See WSGI spec.
    Returns:
      A werkzeug Response.
    """
    request = wrappers.Request(environ)
    parsed_url = urlparse.urlparse(request.path)
    # Remove a trailing slash, if present.
    clean_path = parsed_url.path
    if clean_path.endswith('/'):
      clean_path = clean_path[:-1]
    # pylint: disable=too-many-function-args
    if clean_path in self.data_applications:
      return self.data_applications[clean_path](environ, start_response)
    elif clean_path in TAB_ROUTES:
      return self._serve_index(environ, start_response)
    else:
      return self._serve_static_file(request, clean_path)(environ,
                                                          start_response)
    # pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
  """Parses `logdir` into a map from paths to run group names.
  The events files flag format is a comma-separated list of path specifications.
  A path specification either looks like 'group_name:/path/to/directory' or
  '/path/to/directory'; in the latter case, the group is unnamed. Group names
  cannot start with a forward slash: /foo:bar/baz will be interpreted as a
  spec with no name and path '/foo:bar/baz'.
  Globs are not supported.
  Args:
    logdir: A comma-separated list of run specifications.
  Returns:
    A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
    Groups without an explicit name are named after their path. If logdir is
    None, returns an empty dict, which is helpful for testing things that don't
    require any valid runs.
  """
  files = {}
  if logdir is None:
    return files
  # Make sure keeping consistent with ParseURI in core/lib/io/path.cc
  uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
  for specification in logdir.split(','):
    # Check if the spec contains group. A spec start with xyz:// is regarded as
    # URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
    # then we assume it's a path with a colon.
    if (uri_pattern.match(specification) is None and ':' in specification and
        specification[0] != '/'):
      # We split at most once so run_name:/path:with/a/colon will work.
      run_name, _, path = specification.partition(':')
    else:
      run_name = None
      path = specification
    if uri_pattern.match(path) is None:
      path = os.path.realpath(path)
    files[path] = run_name
  return files
def reload_multiplexer(multiplexer, path_to_run):
  """Loads all runs into the multiplexer.
  Args:
    multiplexer: The `EventMultiplexer` to add runs to and reload.
    path_to_run: A dict mapping from paths to run names, where `None` as the run
      name is interpreted as a run name equal to the path.
  """
  start = time.time()
  logging.info('TensorBoard reload process beginning')
  for (path, name) in six.iteritems(path_to_run):
    multiplexer.AddRunsFromDirectory(path, name)
  logging.info('TensorBoard reload process: Reload the whole Multiplexer')
  multiplexer.Reload()
  duration = time.time() - start
  logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
  """Starts a thread to automatically reload the given multiplexer.
  The thread will reload the multiplexer by calling `ReloadMultiplexer` every
  `load_interval` seconds, starting immediately.
  Args:
    multiplexer: The `EventMultiplexer` to add runs to and reload.
    path_to_run: A dict mapping from paths to run names, where `None` as the run
      name is interpreted as a run name equal to the path.
    load_interval: How many seconds to wait after one load before starting the
      next load.
  Returns:
    A started `threading.Thread` that reloads the multiplexer.
  """
  # We don't call multiplexer.Reload() here because that would make
  # AddRunsFromDirectory block until the runs have all loaded.
  def _reload_forever():
    while True:
      reload_multiplexer(multiplexer, path_to_run)
      time.sleep(load_interval)
  thread = threading.Thread(target=_reload_forever)
  thread.daemon = True
  thread.start()
  return thread
def get_tensorboard_tag():
  """Read the TensorBoard TAG number, and return it or an empty string."""
  tag = resource_loader.load_resource('tensorboard/TAG').strip()
  return tag
 | 
	apache-2.0 | -6,497,820,005,484,346,000 | 35.931319 | 80 | 0.673734 | false | 
| 
	slandis/InkCutter | 
	inkcutter/app/bin/device.py | 
	1 | 
	3171 | 
	#!/usr/bin/env python
# InkCutter, Plot HPGL directly from Inkscape.
# device.py
#
# Copyright 2010 Jairus Martin <[email protected]>
# Copyright 2013 Shaun Landis <[email protected]>
#       
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import serial
from lxml import etree
import os
if os.name != 'nt':
	import cups
class Device:
	def __init__(self,config={}):
		#self.xml = etree.parse(filename).getroot()
		conf = {'width':0,'length':0,'name':'','interface':'serial','serial':{'port':'/dev/ttyUSB0','baud':9600}}
		conf.update(config)
		self.width = conf['width']
		self.length = conf['length']
		self.name = conf['name']
		self.interface = conf['interface']
		self.serial = conf['serial']
		
	
	def getPrinters(self):
		con = cups.Connection()
		printers = con.getPrinters()
		self.printers = printers
	
	def save(self,id,attribs): # save settings to xml
		dev = self.xml.find('device[@id="%s"]'%id)
		err = []
		# delete if exists?
		if len(dev):
			del dev[0]
		else:
			dev = etree.SubElement(self.xml,'device')
			dev.set('id',id)
		iface = etree.SubElement(d, "interface")
		for key,value in attribs.iteritems():
			iface.set(key,value)
			
	def plot(self,filename):
		def toSerial(data,settings):
			assert type(data) == str, "input data must be a str type"
			import serial
			# set default settings
			set = {'baud':9600}
			set.update(settings);
			
			#create serial and set settings
			ser = serial.Serial()
			ser.baudrate = set['baud']
			ser.port = set['port']
			ser.open()
			if ser.isOpen():
				#send data & return bits sent
				bits = ser.write(data);
				ser.close();		
				return True;
			else:
				return False;
		def toPrinter(data,printer):
			assert type(data) == str, "input data must be a str type"
			assert type(printer) == str, "printer name must be a string"
			
			printer = os.popen('lpr -P %s'%(printer),'w')
			printer.write(data)
			printer.close()
			return True;
			
		def toUSBPrinter(data,printer):
			assert type(data) == str, "input data must be a str type"
			assert type(printer) == str, "printer name must be a string"
			
			p = open(printer, 'w+')
			p.write(data)
			p.close()
			return True;
		
		
		f=open(filename,'r')
		if self.interface=='printer':
			toPrinter(f.read(),self.name)
		elif self.interface=='usb printer':
			toUSBPrinter(f.read(),self.name)
		elif self.interface=='serial':
			toSerial(f.read(),self.serial)
		else:
			raise AssertionError('Invalid interface type, only printers and serial connections are supported.')
		
	
 | 
	gpl-3.0 | 8,242,678,511,675,439,000 | 26.336207 | 107 | 0.674866 | false | 
| 
	ecederstrand/django | 
	tests/generic_relations_regress/tests.py | 
	300 | 
	11453 | 
	from django.db.models import Q, Sum
from django.db.models.deletion import ProtectedError
from django.db.utils import IntegrityError
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
    A, B, C, D, Address, Board, CharLink, Company, Contact, Content, Developer,
    Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
    Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
)
class GenericRelationTests(TestCase):
    def test_inherited_models_content_type(self):
        """
        Test that GenericRelations on inherited classes use the correct content
        type.
        """
        p = Place.objects.create(name="South Park")
        r = Restaurant.objects.create(name="Chubby's")
        l1 = Link.objects.create(content_object=p)
        l2 = Link.objects.create(content_object=r)
        self.assertEqual(list(p.links.all()), [l1])
        self.assertEqual(list(r.links.all()), [l2])
    def test_reverse_relation_pk(self):
        """
        Test that the correct column name is used for the primary key on the
        originating model of a query.  See #12664.
        """
        p = Person.objects.create(account=23, name='Chef')
        Address.objects.create(street='123 Anywhere Place',
                               city='Conifer', state='CO',
                               zipcode='80433', content_object=p)
        qs = Person.objects.filter(addresses__zipcode='80433')
        self.assertEqual(1, qs.count())
        self.assertEqual('Chef', qs[0].name)
    def test_charlink_delete(self):
        oddrel = OddRelation1.objects.create(name='clink')
        CharLink.objects.create(content_object=oddrel)
        oddrel.delete()
    def test_textlink_delete(self):
        oddrel = OddRelation2.objects.create(name='tlink')
        TextLink.objects.create(content_object=oddrel)
        oddrel.delete()
    def test_q_object_or(self):
        """
        Tests that SQL query parameters for generic relations are properly
        grouped when OR is used.
        Test for bug http://code.djangoproject.com/ticket/11535
        In this bug the first query (below) works while the second, with the
        query parameters the same but in reverse order, does not.
        The issue is that the generic relation conditions do not get properly
        grouped in parentheses.
        """
        note_contact = Contact.objects.create()
        org_contact = Contact.objects.create()
        Note.objects.create(note='note', content_object=note_contact)
        org = Organization.objects.create(name='org name')
        org.contacts.add(org_contact)
        # search with a non-matching note and a matching org name
        qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
                                    Q(organizations__name__icontains=r'org name'))
        self.assertIn(org_contact, qs)
        # search again, with the same query parameters, in reverse order
        qs = Contact.objects.filter(
            Q(organizations__name__icontains=r'org name') |
            Q(notes__note__icontains=r'other note'))
        self.assertIn(org_contact, qs)
    def test_join_reuse(self):
        qs = Person.objects.filter(
            addresses__street='foo'
        ).filter(
            addresses__street='bar'
        )
        self.assertEqual(str(qs.query).count('JOIN'), 2)
    def test_generic_relation_ordering(self):
        """
        Test that ordering over a generic relation does not include extraneous
        duplicate results, nor excludes rows not participating in the relation.
        """
        p1 = Place.objects.create(name="South Park")
        p2 = Place.objects.create(name="The City")
        c = Company.objects.create(name="Chubby's Intl.")
        Link.objects.create(content_object=p1)
        Link.objects.create(content_object=c)
        places = list(Place.objects.order_by('links__id'))
        def count_places(place):
            return len([p for p in places if p.id == place.id])
        self.assertEqual(len(places), 2)
        self.assertEqual(count_places(p1), 1)
        self.assertEqual(count_places(p2), 1)
    def test_target_model_is_unsaved(self):
        """Test related to #13085"""
        # Fails with another, ORM-level error
        dev1 = Developer(name='Joe')
        note = Note(note='Deserves promotion', content_object=dev1)
        self.assertRaises(IntegrityError, note.save)
    def test_target_model_len_zero(self):
        """Test for #13085 -- __len__() returns 0"""
        team1 = Team.objects.create(name='Backend devs')
        try:
            note = Note(note='Deserve a bonus', content_object=team1)
        except Exception as e:
            if (issubclass(type(e), Exception) and
                    str(e) == 'Impossible arguments to GFK.get_content_type!'):
                self.fail("Saving model with GenericForeignKey to model instance whose "
                          "__len__ method returns 0 shouldn't fail.")
            raise e
        note.save()
    def test_target_model_nonzero_false(self):
        """Test related to #13085"""
        # __nonzero__() returns False -- This actually doesn't currently fail.
        # This test validates that
        g1 = Guild.objects.create(name='First guild')
        note = Note(note='Note for guild', content_object=g1)
        note.save()
    @skipIfDBFeature('interprets_empty_strings_as_nulls')
    def test_gfk_to_model_with_empty_pk(self):
        """Test related to #13085"""
        # Saving model with GenericForeignKey to model instance with an
        # empty CharField PK
        b1 = Board.objects.create(name='')
        tag = Tag(label='VP', content_object=b1)
        tag.save()
    def test_ticket_20378(self):
        # Create a couple of extra HasLinkThing so that the autopk value
        # isn't the same for Link and HasLinkThing.
        hs1 = HasLinkThing.objects.create()
        hs2 = HasLinkThing.objects.create()
        hs3 = HasLinkThing.objects.create()
        hs4 = HasLinkThing.objects.create()
        l1 = Link.objects.create(content_object=hs3)
        l2 = Link.objects.create(content_object=hs4)
        self.assertQuerysetEqual(
            HasLinkThing.objects.filter(links=l1),
            [hs3], lambda x: x)
        self.assertQuerysetEqual(
            HasLinkThing.objects.filter(links=l2),
            [hs4], lambda x: x)
        self.assertQuerysetEqual(
            HasLinkThing.objects.exclude(links=l2),
            [hs1, hs2, hs3], lambda x: x, ordered=False)
        self.assertQuerysetEqual(
            HasLinkThing.objects.exclude(links=l1),
            [hs1, hs2, hs4], lambda x: x, ordered=False)
    def test_ticket_20564(self):
        b1 = B.objects.create()
        b2 = B.objects.create()
        b3 = B.objects.create()
        c1 = C.objects.create(b=b1)
        c2 = C.objects.create(b=b2)
        c3 = C.objects.create(b=b3)
        A.objects.create(flag=None, content_object=b1)
        A.objects.create(flag=True, content_object=b2)
        self.assertQuerysetEqual(
            C.objects.filter(b__a__flag=None),
            [c1, c3], lambda x: x
        )
        self.assertQuerysetEqual(
            C.objects.exclude(b__a__flag=None),
            [c2], lambda x: x
        )
    def test_ticket_20564_nullable_fk(self):
        b1 = B.objects.create()
        b2 = B.objects.create()
        b3 = B.objects.create()
        d1 = D.objects.create(b=b1)
        d2 = D.objects.create(b=b2)
        d3 = D.objects.create(b=b3)
        d4 = D.objects.create()
        A.objects.create(flag=None, content_object=b1)
        A.objects.create(flag=True, content_object=b1)
        A.objects.create(flag=True, content_object=b2)
        self.assertQuerysetEqual(
            D.objects.exclude(b__a__flag=None),
            [d2], lambda x: x
        )
        self.assertQuerysetEqual(
            D.objects.filter(b__a__flag=None),
            [d1, d3, d4], lambda x: x
        )
        self.assertQuerysetEqual(
            B.objects.filter(a__flag=None),
            [b1, b3], lambda x: x
        )
        self.assertQuerysetEqual(
            B.objects.exclude(a__flag=None),
            [b2], lambda x: x
        )
    def test_extra_join_condition(self):
        # A crude check that content_type_id is taken in account in the
        # join/subquery condition.
        self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
        # No need for any joins - the join from inner query can be trimmed in
        # this case (but not in the above case as no a objects at all for given
        # B would then fail).
        self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
        self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
    def test_annotate(self):
        hs1 = HasLinkThing.objects.create()
        hs2 = HasLinkThing.objects.create()
        HasLinkThing.objects.create()
        b = Board.objects.create(name=str(hs1.pk))
        Link.objects.create(content_object=hs2)
        l = Link.objects.create(content_object=hs1)
        Link.objects.create(content_object=b)
        qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
        # If content_type restriction isn't in the query's join condition,
        # then wrong results are produced here as the link to b will also match
        # (b and hs1 have equal pks).
        self.assertEqual(qs.count(), 1)
        self.assertEqual(qs[0].links__sum, l.id)
        l.delete()
        # Now if we don't have proper left join, we will not produce any
        # results at all here.
        # clear cached results
        qs = qs.all()
        self.assertEqual(qs.count(), 1)
        # Note - 0 here would be a nicer result...
        self.assertIs(qs[0].links__sum, None)
        # Finally test that filtering works.
        self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
        self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
    def test_filter_targets_related_pk(self):
        HasLinkThing.objects.create()
        hs2 = HasLinkThing.objects.create()
        l = Link.objects.create(content_object=hs2)
        self.assertNotEqual(l.object_id, l.pk)
        self.assertQuerysetEqual(
            HasLinkThing.objects.filter(links=l.pk),
            [hs2], lambda x: x)
    def test_editable_generic_rel(self):
        GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
        form = GenericRelationForm()
        self.assertIn('links', form.fields)
        form = GenericRelationForm({'links': None})
        self.assertTrue(form.is_valid())
        form.save()
        links = HasLinkThing._meta.get_field('links')
        self.assertEqual(links.save_form_data_calls, 1)
    def test_ticket_22998(self):
        related = Related.objects.create()
        content = Content.objects.create(related_obj=related)
        Node.objects.create(content=content)
        # deleting the Related cascades to the Content cascades to the Node,
        # where the pre_delete signal should fire and prevent deletion.
        with self.assertRaises(ProtectedError):
            related.delete()
    def test_ticket_22982(self):
        place = Place.objects.create(name='My Place')
        self.assertIn('GenericRelatedObjectManager', str(place.links))
 | 
	bsd-3-clause | 1,001,468,639,439,804,000 | 39.758007 | 92 | 0.615647 | false | 
| 
	HackerTool/vivisect | 
	vstruct/defs/pcap.py | 
	2 | 
	16024 | 
	
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
PCAP_LINKTYPE_ETHER     = 1
PCAP_LINKTYPE_RAW       = 101
PCAPNG_BOM              = 0x1A2B3C4D
OPT_ENDOFOPT            = 0
OPT_COMMENT             = 1
#PCAPNG_BLOCKTYPE_SECTION_HEADER options
OPT_SHB_HARDWARE        = 2
OPT_SHB_OS              = 3
OPT_SHB_USERAPPL        = 4
#PCAPNG_INTERFACE_DESCRIPTION_BLOCK options
OPT_IF_NAME             = 2
OPT_IF_DESCRIPTION      = 3
OPT_IF_IPV4ADDR         = 4
OPT_IF_IPV6ADDR         = 5
OPT_IF_MACADDR          = 6
OPT_IF_EUIADDR          = 7
OPT_IF_SPEED            = 8
OPT_IF_TSRESOL          = 9
OPT_IF_TZONE            = 10
OPT_IF_FILTER           = 11
OPT_IF_OS               = 12
OPT_IF_FCSLEN           = 13
OPT_IF_TSOFFSET         = 14
# options for PCAPNG_ENHANCED_PACKET_BLOCK
OPT_EPB_FLAGS           = 2
OPT_EPB_HASH            = 3
OPT_EPB_DROPCOUNT       = 4
# values used in the blocktype field
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION      = 0x00000001
PCAPNG_BLOCKTYPE_PACKET                     = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET              = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION            = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS            = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET            = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER             = 0x0a0d0d0a
def pad4bytes(size):
    if (size % 4) == 0:
        return size
    return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
    def __init__(self):
        vstruct.VStruct.__init__(self)
        self.magic      = v_uint32()
        self.vers_maj   = v_uint16()
        self.vers_min   = v_uint16()
        self.thiszone   = v_uint32()
        self.sigfigs    = v_uint32()
        self.snaplen    = v_uint32()
        self.linktype   = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
    def __init__(self):
        vstruct.VStruct.__init__(self)
        self.tvsec      = v_uint32()
        self.tvusec     = v_uint32()
        self.caplen     = v_uint32()
        self.len        = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
    '''
    Used to read the block type & size when parsing the file
    '''
    def __init__(self, bigend=False):
        vstruct.VStruct.__init__(self)
        self.blocktype      = v_uint32(bigend=bigend)
        self.blocksize      = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
    '''
    Used to inherit the weird parsing style where there's variable length
    options at the end, followed by the duplicate block total length
    '''
    def __init__(self, bigend=False):
        vstruct.VStruct.__init__(self)
        #non-vstruct field, set during checking BOM
        self.bigend         = False
    def vsParse(self, bytez, offset=0):
        startoff = offset
        roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
        #(blocksize-4): because we still need the trailing blocksize2
        # apparently blocks can completely omit the options list and not
        # even have the OPT_ENDOFOPT entry
        while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
            opt = PCAPNG_OPTION(bigend=self.bigend)
            roff = opt.vsParse(bytez, roff)
            if opt.code == OPT_ENDOFOPT:
                break
            self.options.vsAddElement(opt)
        # append trailing blocksize2
        bs2 = v_uint32(bigend=self.bigend)
        self.vsAddField('blocksize2', bs2)
        roff = bs2.vsParse(bytez, roff)
        #pad, plus we skip
        return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
    def __init__(self, bigend=False):
        PCAPNG_BLOCK_PARENT.__init__(self, bigend)
        self.blocktype      = v_uint32(bigend=bigend)
        self.blocksize      = v_uint32(bigend=bigend)
        self.bom            = v_uint32(bigend=bigend)
        self.vers_maj       = v_uint16(bigend=bigend)
        self.vers_min       = v_uint16(bigend=bigend)
        self.sectionsize    = v_uint64(bigend=bigend)
        self.options        = vstruct.VArray([])
        #blocksize2: dynamcally added in vsParse()
        #self.blocksize2     = v_uint32(bigend=bigend)
    def pcb_bom(self):
        bom = self.vsGetField('bom')
        if  self.bom == PCAPNG_BOM:
            #if it matches, then the endian of bom is correct
            self.bigend = bom._vs_bigend
        else:
            self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
    def __init__(self, bigend=False):
        vstruct.VStruct.__init__(self)
        self.code           = v_uint16(bigend=bigend)
        self.optsize        = v_uint16(bigend=bigend)
        self.bytes          = v_bytes(0)
    def pcb_optsize(self):
        size = pad4bytes(self.optsize)
        self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
    def __init__(self, bigend=False):
        PCAPNG_BLOCK_PARENT.__init__(self, bigend)
        self.blocktype      = v_uint32(bigend=bigend)
        self.blocksize      = v_uint32(bigend=bigend)
        self.linktype       = v_uint16(bigend=bigend)
        self.reserved       = v_uint16(bigend=bigend)
        self.snaplen        = v_uint32(bigend=bigend)
        self.options        = vstruct.VArray([])
        #blocksize2: dynamcally added in vsParse()
        #self.blocksize2     = v_uint32(bigend=bigend)
    def vsParse(self, bytez, offset=0):
        '''
        We need the tsresol value to adjust timestamp values, so pull it
        out here
        '''
        ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
        self.tsresol = None
        #default offset is 0
        self.tsoffset = 0
        #sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options')
        for i, opt in self.options:
            if opt.code == OPT_IF_TSRESOL:
                self.tsresol = ord(opt.bytes[0])
                #sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol)
            elif opt.code == OPT_IF_TSOFFSET:
                fmt = '<Q'
                if self.bigend:
                    fmt = '>Q'
                self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
                #sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset)
        return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
    def __init__(self, bigend=False):
        PCAPNG_BLOCK_PARENT.__init__(self, bigend)
        self.blocktype      = v_uint32(bigend=bigend)
        self.blocksize      = v_uint32(bigend=bigend)
        self.interfaceid    = v_uint32(bigend=bigend)
        self.tstamphi       = v_uint32(bigend=bigend)
        self.tstamplow      = v_uint32(bigend=bigend)
        self.caplen         = v_uint32(bigend=bigend)
        self.packetlen      = v_uint32(bigend=bigend)
        self.data           = v_bytes(0)
        self.options        = vstruct.VArray([])
        #blocksize2: dynamcally added in vsParse()
        #self.blocksize2     = v_uint32(bigend=bigend)
    def pcb_caplen(self):
        size = pad4bytes(self.caplen)
        self.vsGetField('data').vsSetLength(size)
    def setPcapTimestamp(self, idb):
        '''
        Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp
        '''
        #orange left off here
        self.snaplen = idb.snaplen
        tstamp = (self.tstamphi << 32) | self.tstamplow
        scale = 1000000
        if idb.tsresol is None:
            #if not set, capture assumes 10e-6 resolution
            pass
        elif (0x80 & idb.tsresol) == 0:
            # remaining bits are resolution, to a negative power of 10
            scale = 10**(idb.tsresol & 0x7f)
        else:
            # remaining bits are resolution, to a negative power of 2
            scale = 1 << (idb.tsresol & 0x7f)
        self.tvsec = (tstamp / scale) + idb.tsoffset
        self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
    '''
    Note: no variable length options fields, so inheriting from vstruct directly
    '''
    def __init__(self, bigend=False):
        vstruct.VStruct.__init__(self)
        self.blocktype      = v_uint32(bigend=bigend)
        self.blocksize      = v_uint32(bigend=bigend)
        self.packetlen      = v_uint32(bigend=bigend)
        self.data           = v_bytes(0)
        self.blocksize2     = v_uint32(bigend=bigend)
    def pcb_blocksize(self):
        self.caplen = pad4bytes(self.blocksize - 16)
        self.vsGetField('data').vsSetLength(self.caplen)
    def setPcapTimestamp(self, idb):
        #no timestamp in this type of block :(
        self.tvsec = idb.tsoffset
        self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
    fd = file(filename, 'rb')
    for x in iterPcapFile(fd, reuse=reuse):
        yield x
    
def iterPcapFile(fd, reuse=False):
    '''
    Figure out if it's a tcpdump format, or pcapng
    '''
    h = PCAP_FILE_HEADER()
    b = fd.read(len(h))
    h.vsParse(b, fast=True)
    fd.seek(0)
    if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
        return _iterPcapNgFile(fd, reuse)
    return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
    h = PCAP_FILE_HEADER()
    b = fd.read(len(h))
    h.vsParse(b, fast=True)
    linktype = h.linktype
    if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
        raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
    pkt = PCAP_PACKET_HEADER()
    eII = vs_inet.ETHERII()
    pktsize = len(pkt)
    eIIsize = len(eII)
    ipv4 = vs_inet.IPv4()
    ipv4size = 20
    tcp_hdr = vs_inet.TCP()
    udp_hdr = vs_inet.UDP()
    icmp_hdr = vs_inet.ICMP()
    go = True
    while go:
        hdr = fd.read(pktsize)
        if len(hdr) != pktsize:
            break
        pkt.vsParse(hdr, fast=True)
        b = fd.read(pkt.caplen)
        offset = 0
        if linktype == PCAP_LINKTYPE_ETHER:
            if len(b) < eIIsize:
                continue
            eII.vsParse(b, 0, fast=True)
            # No support for non-ip protocol yet...
            if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
                continue
            offset += eIIsize
            if eII.etype == vs_inet.ETH_P_VLAN:
                offset +=  4
        elif linktype == PCAP_LINKTYPE_RAW:
            pass
        #print eII.tree()
        if not reuse:
            ipv4 = vs_inet.IPv4()
        if (len(b) - offset) < ipv4size:
            continue
        ipv4.vsParse(b, offset, fast=True)
        # Make b *only* the IP datagram bytes...
        b = b[offset:offset+ipv4.totlen]
        offset = 0
        offset += len(ipv4)
        tsize = len(b) - offset
        if ipv4.proto == vs_inet.IPPROTO_TCP:
            if tsize < 20:
                continue
            if not reuse:
                tcp_hdr = vs_inet.TCP()
            tcp_hdr.vsParse(b, offset, fast=True)
            offset += len(tcp_hdr)
            pdata = b[offset:]
            yield pkt,ipv4,tcp_hdr,pdata
        elif ipv4.proto == vs_inet.IPPROTO_UDP:
            if tsize < 8:
                continue
            if not reuse:
                udp_hdr = vs_inet.UDP()
            udp_hdr.vsParse(b, offset, fast=True)
            offset += len(udp_hdr)
            pdata = b[offset:]
            yield pkt,ipv4,udp_hdr,pdata
        elif ipv4.proto == vs_inet.IPPROTO_ICMP:
            if tsize < 4:
                continue
            if not reuse:
                icmp_hdr = vs_inet.ICMP()
            icmp_hdr.vsParse(b, offset, fast=True)
            offset += len(icmp_hdr)
            pdata = b[offset:]
            yield pkt,ipv4,icmp_hdr,pdata
        else:
            pass
            #print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto
def _iterPcapNgFile(fd, reuse=False):
    header = PCAPNG_GENERIC_BLOCK_HEADER()
    ifaceidx = 0
    ifacedict = {}
    roff = 0
    bigend = False
    curroff = fd.tell()
    b0 = fd.read(len(header))
    fd.seek(curroff)
    while len(b0) == len(header):
        header.vsParse(b0, fast=True)
        body = fd.read(header.blocksize)
        if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
            shb = PCAPNG_SECTION_HEADER_BLOCK()
            roff = shb.vsParse(body)
            bigend = shb.bigend
            #reset interface stuff since we're in a new section
            ifaceidx = 0
            ifacedict = {}
        elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
            idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
            roff = idb.vsParse(body)
            #save off the interface for later reference
            ifacedict[ifaceidx] = idb
            ifaceidx += 1
        elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
            spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
            roff = spb.vsParse(body)
            tup = _parsePcapngPacketBytes(iface.linktype, spb)
            if tup is not None:
                #if it is None, just fall through & read next block
                yield tup
        elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
            epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
            roff = epb.vsParse(body)
            iface = ifacedict.get(epb.interfaceid)
            epb.setPcapTimestamp(iface)
            tup = _parsePcapngPacketBytes(iface.linktype, epb)
            if tup is not None:
                #if tup is None, just fall through & read next block
                yield tup
        #TODO: other blocks needed?
        #PCAPNG_BLOCKTYPE_PACKET (obsolete)
        #PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
        #PCAPNG_BLOCKTYPE_INTERFACE_STATS:
        else:
            #print 'Unknown block type: 0x%08x: 0x%08x 0x%08x bytes' % (roff, header.blocktype, header.blocksize)
            pass
        curroff = fd.tell()
        b0 = fd.read(len(header))
        fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
    '''
    pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK
    On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata) 
    Returns None if the packet can't be parsed
    '''
    if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
        raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
    #pkt = PCAP_PACKET_HEADER()
    eII = vs_inet.ETHERII()
    eIIsize = len(eII)
    offset = 0
    if linktype == PCAP_LINKTYPE_ETHER:
        if len(pkt.data) < eIIsize:
            return None
        eII.vsParse(pkt.data, 0, fast=True)
        # No support for non-ip protocol yet...
        if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
            return None
        offset += eIIsize
        if eII.etype == vs_inet.ETH_P_VLAN:
            offset +=  4
    elif linktype == PCAP_LINKTYPE_RAW:
        pass
    ipv4 = vs_inet.IPv4()
    if (len(pkt.data) - offset) < len(ipv4):
        return None
    ipv4.vsParse(pkt.data, offset, fast=True)
    # Make b *only* the IP datagram bytes...
    b = pkt.data[offset:offset+ipv4.totlen]
    offset = 0
    offset += len(ipv4)
    tsize = len(b) - offset
    if ipv4.proto == vs_inet.IPPROTO_TCP:
        if tsize < 20:
            return None
        tcp_hdr = vs_inet.TCP()
        tcp_hdr.vsParse(b, offset, fast=True)
        offset += len(tcp_hdr)
        pdata = b[offset:]
        return pkt,ipv4,tcp_hdr,pdata
    elif ipv4.proto == vs_inet.IPPROTO_UDP:
        if tsize < 8:
            return None
        udp_hdr = vs_inet.UDP()
        udp_hdr.vsParse(b, offset, fast=True)
        offset += len(udp_hdr)
        pdata = b[offset:]
        return pkt,ipv4,udp_hdr,pdata
    elif ipv4.proto == vs_inet.IPPROTO_ICMP:
        if tsize < 4:
            return None
        icmp_hdr = vs_inet.ICMP()
        icmp_hdr.vsParse(b, offset, fast=True)
        offset += len(icmp_hdr)
        pdata = b[offset:]
        return pkt,ipv4,icmp_hdr,pdata
    else:
        pass
        #print 'UNHANDLED IP PROTOCOL: %d' % ipv4.proto
    return None
 | 
	apache-2.0 | -2,379,434,446,897,924,000 | 31.503043 | 113 | 0.569958 | false | 
| 
	HopeFOAM/HopeFOAM | 
	ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/ZopeInterface/zope/interface/tests/test_element.py | 
	79 | 
	1320 | 
	##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Element meta-class.
"""
import unittest
from zope.interface.interface import Element
class TestElement(unittest.TestCase):
    def test_taggedValues(self):
        """Test that we can update tagged values of more than one element
        """
        
        e1 = Element("foo")
        e2 = Element("bar")
        e1.setTaggedValue("x", 1)
        e2.setTaggedValue("x", 2)
        self.assertEqual(e1.getTaggedValue("x"), 1)
        self.assertEqual(e2.getTaggedValue("x"), 2)
def test_suite():
    suite = unittest.TestSuite()
    suite.addTest(unittest.makeSuite(TestElement))
    return suite
if __name__ == '__main__':
    unittest.main(defaultTest='test_suite')
 | 
	gpl-3.0 | -6,502,980,762,974,214,000 | 31.195122 | 78 | 0.605303 | false | 
| 
	nemobis/BEIC | 
	METS_fileSec_validator.py | 
	1 | 
	1331 | 
	#!/usr/bin/python
# -*- coding: utf-8  -*-
"""
   Extractor to validate a METS file and check the existence and content of
   the files linked from each fileSec/fileGrp/file/FLocat tag, assumed to
   contain an MD5 checksum. The "md5sum" utility is required.
"""
#
# (C) Federico Leva and Fondazione BEIC, 2018
#
# Distributed under the terms of the MIT license.
#
__version__ = '0.1.0'
from lxml import etree
import os
import subprocess
# http://lxml.de/validation.html
parser = etree.XMLParser(dtd_validation=True)
digest = open('mets.md5sum', 'w')
for dirpath, dirnames, filenames in os.walk('.'):
	for filename in [ each for each in filenames if each.endswith('.xml') ]:
		xml = os.path.join(dirpath, filename)
		try:
			mets = etree.parse(open(xml, 'r'))
			files = mets.xpath('//*[local-name()="file"]')
			for item in files:
				content = item.xpath( './*[local-name()="FLocat"]/@xlink:href',
					namespaces={"xlink": "http://www.w3.org/1999/xlink"} )[0]
				checksum = item.xpath('./@CHECKSUM')[0]
				digest.write("%s  %s\n" % (checksum, os.path.normpath(os.path.join(dirpath, content)) ) )
		except:
			pass
check = subprocess.call(["md5sum", "-c", "--status", "mets.md5sum"])
if check == 0:
	print("SUCCESS: The METS content has been verified correctly.")
else:
	print("ERROR: The checksum validation has failed.")
 | 
	mit | 3,287,267,885,847,843,000 | 31.463415 | 93 | 0.670173 | false | 
| 
	PlotWatt/sql_query_dict | 
	test.py | 
	1 | 
	2835 | 
	import pytest
import sql_query_dict
def test_escape_string_with_single_quote():
    assert sql_query_dict.quote_string("'a") == '"\'a"'
def test_escape_string_with_double_quote():
    assert sql_query_dict.quote_string('"a') == "'\"a'"
def test_escape_string_with_single_and_double_quote():
    assert sql_query_dict.quote_string(""" '" """) == """' \\\'" '"""
def test_escape_string():
    assert sql_query_dict.quote_string('a') == "'a'"
def test_split_key_compare():
    assert sql_query_dict._split_key_compare('key|=') == ('key', '|=')
def test_mysql_list_esc():
    assert sql_query_dict.mysql_list_esc([1, 2]) == "1,2"
def test_mysql_list_esc_string_numbers():
    assert sql_query_dict.mysql_list_esc(["1", "2"]) == "'1','2'"
def test_mysql_list_esc_string():
    assert sql_query_dict.mysql_list_esc(["a", "b"]) == "'a','b'"
def test_mysql_list_with_or_equals():
    assert sql_query_dict._mysql_clause('x|=', [1, 2, 3], '%s') == \
        " (x IN (1,2,3)) "
def test_mysql_list_with_none():
    assert sql_query_dict._mysql_clause('x', [None, False], '%s') == \
        ' ((x IS NULL) OR  (x IN (False)) ) '
def test_mysql_list_with_generator():
    assert sql_query_dict._mysql_clause(
        'x', (x for x in [1, 2, 3]), '%s'
    ) == " (x IN (1,2,3)) "
def test_parse_tablename():
    assert sql_query_dict._parse_tablename('xyz') == 'xyz'
def test_parse_tablename_err():
    with pytest.raises(TypeError):
        sql_query_dict._parse_tablename(1)
def test_parse_tablename_set():
    assert sql_query_dict._parse_tablename(set(['xyz', 'abc'])) in (
        'xyz,abc', 'abc,xyz'
    )
def test_mysql_with_gt_lt():
    SQL, vals = sql_query_dict.select(
        't', 'z', {'x><': (10, 30), 'y': 1}
    )
    # easiest way to handle both orderings of the clauses
    assert SQL in (
        "SELECT z FROM t WHERE  (y = %s)  AND  ((x > %s) AND (x < %s)) ",
        "SELECT z FROM t WHERE  ((x > %s) AND (x < %s))  AND  (y = %s) ",
    )
    assert vals in (
        [1, 10, 30],
        [10, 30, 1],
    )
def test_mysql_string_value():
    assert sql_query_dict._mysql_clause('x', 'the', '%s') == \
        " (x = %s) "
def test_mysql_like():
    assert sql_query_dict._mysql_clause('x~', 'the %', '%s') == \
        " (x LIKE %s) "
def test_mysql_not_like():
    assert sql_query_dict._mysql_clause('x!~', 'the %', '%s') == \
        " (x NOT LIKE %s) "
def test_mysql_not_in():
    assert sql_query_dict._mysql_clause('x!=', [1, 2, 3], '%s') == \
        " (x NOT IN (1,2,3)) "
def test_mysql_list_compare_with_none():
    assert sql_query_dict._mysql_clause(
        'x!=', [None, 1, 2, 3], '%s'
    ) == " ((x IS NOT NULL) AND  (x NOT IN (1,2,3)) ) "
    assert sql_query_dict._mysql_clause(
        'x',   [None, 1, 2, 3], '%s'
    ) == " ((x IS NULL) OR  (x IN (1,2,3)) ) "
 | 
	apache-2.0 | 1,021,588,993,969,977,700 | 24.772727 | 73 | 0.53933 | false | 
| 
	wizzomafizzo/flairbot | 
	flairbot.py | 
	1 | 
	5950 | 
	#!/usr/bin/env python3
"""Reddit bot for updating user flairs via PM requests"""
import sys
import re
import os
import time
import logging
import logging.handlers
import praw
import OAuth2Util
from config import cfg
def setup_logging():
    """Configure logging module for rotating logs and console output"""
    rotate_cfg = {
        "filename": cfg["log_file"],
        "maxBytes": 1024*1000,
        "backupCount": 5
    }
    rotate_fmt = "%(asctime)s %(levelname)-8s %(message)s"
    console_fmt = "%(levelname)-8s %(message)s"
    if cfg["debug"]:
        level = logging.DEBUG
    else:
        level = logging.INFO
    logger = logging.getLogger()
    logger.setLevel(level)
    rotate = logging.handlers.RotatingFileHandler(**rotate_cfg)
    rotate.setFormatter(logging.Formatter(rotate_fmt))
    logger.addHandler(rotate)
    console = logging.StreamHandler()
    console.setLevel(level)
    console.setFormatter(logging.Formatter(console_fmt))
    logger.addHandler(console)
def parse_wiki_flairs(content):
    regex = re.compile(cfg["wiki_format"])
    matches = []
    for line in content.splitlines():
        match = regex.match(line)
        if match is not None:
            flair = match.groups()
            matches.append(flair[0])
    return matches
class FlairBot:
    def __init__(self):
        user_agent = cfg["user_agent"] % (cfg["version"],
                                          cfg["subreddit"])
        self.r = praw.Reddit(user_agent=user_agent)
        self.o = OAuth2Util.OAuth2Util(self.r)
        self.processed = 0
        self.flairs = []
        self.login()
    def login(self):
        """Start a new reddit session"""
        logging.info("Logging in...")
        try:
            self.o.refresh()
        except:
            logging.exception("Login failed")
            sys.exit(1)
    def get_requests(self):
        """Fetch and return all new PMs matching configured subject"""
        logging.info("Fetching new messages...")
        pending = []
        try:
            msgs = self.r.get_unread(limit=None)
        except:
            logging.exception("Failed to get new messages")
            return
        for msg in msgs:
            logging.debug(msg)
            if str(msg.subject) == cfg["subject"]:
                pending.append(msg)
            if not cfg["limit_read"]:
                msg.mark_as_read()
        pending.reverse()
        logging.info("Got %i new requests", len(pending))
        return pending
    def process_request(self, subreddit, msg):
        """Read flair request message and set if possible"""
        user = str(msg.author)
        flair = str(msg.body)
        if user in cfg["blacklist"]:
            logging.warning("Skipping blacklisted user: %s", user)
            return
        if flair in self.flairs:
            try:
                subreddit.set_flair(user, "", flair)
            except:
                logging.exception("Error setting flair to %s for %s",
                                  flair, user)
                return
            self.processed += 1
            logging.info("Flair changed to %s for %s", flair, user)
            try:
                self.r.send_message(user,
                                    cfg["msg_subject"],
                                    cfg["msg_success"] % (flair))
            except:
                logging.exception("Error messaging user")
        else:
            logging.warning("Flair %s requested by %s doesn't exist",
                            flair, user)
            wiki = "https://www.reddit.com/r/%s/wiki/%s" % (cfg["subreddit"],
                                                            cfg["wiki_page"])
            try:
                self.r.send_message(user,
                                    cfg["msg_subject"],
                                    cfg["msg_failure"] % (flair, wiki))
            except:
                logging.exception("Error messaging user")
        if cfg["limit_read"]:
            msg.mark_as_read()
    def get_wiki_page(self, subreddit):
        logging.info("Fetching wiki page...")
        if not os.path.exists(cfg["cache_file"]):
            logging.warning("No cache file found")
            modified = 0
        else:
            stat = os.stat(cfg["cache_file"])
            modified = int(stat.st_mtime)
        now = int(time.time())
        if modified > 0 and now - modified < cfg["cache_time"]:
            cache = open(cfg["cache_file"], "r")
            logging.debug("Using valid cache")
            wiki_page = cache.read()
            cache.close()
            return wiki_page
        try:
            logging.debug("Updating cache")
            wiki_page = subreddit.get_wiki_page(cfg["wiki_page"]).content_md
        except (praw.errors.NotFound):
            logging.error("Wiki page %s doesn't exist", cfg["wiki_page"])
            return
        cache = open(cfg["cache_file"], "w")
        logging.debug("Writing cache")
        cache.write(wiki_page)
        cache.close()
        return wiki_page
    def run(self):
        """Process all new flair requests"""
        try:
            requests = self.get_requests()
        except (praw.errors.HTTPException):
            logging.error("OAuth access is invalid")
            return
        subreddit = self.r.get_subreddit(cfg["subreddit"])
        wiki_page = self.get_wiki_page(subreddit)
        if wiki_page is None:
            return
        self.flairs = parse_wiki_flairs(wiki_page)
        logging.debug(self.flairs)
        if requests is None:
            logging.info("No new messages to process")
            return
        for msg in requests:
            self.process_request(subreddit, msg)
setup_logging()
if __name__ == "__main__":
    flair_bot = FlairBot()
    logging.info("Starting new run...")
    flair_bot.run()
    logging.info("Run complete! Processed %i requests.",
                 flair_bot.processed)
 | 
	mit | -8,983,526,375,439,880,000 | 28.455446 | 77 | 0.534118 | false | 
| 
	apark263/tensorflow | 
	tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column_test.py | 
	14 | 
	2330 | 
	# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_feature_column.py (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
class SparseFeatureColumnTest(TensorFlowTestCase):
  """Tests for SparseFeatureColumn.
  """
  def testBasic(self):
    expected_example_indices = [1, 1, 1, 2]
    expected_feature_indices = [0, 1, 2, 0]
    sfc = SparseFeatureColumn(expected_example_indices,
                              expected_feature_indices, None)
    self.assertTrue(isinstance(sfc.example_indices, ops.Tensor))
    self.assertTrue(isinstance(sfc.feature_indices, ops.Tensor))
    self.assertEqual(sfc.feature_values, None)
    with self.cached_session():
      self.assertAllEqual(expected_example_indices, sfc.example_indices.eval())
      self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval())
    expected_feature_values = [1.0, 2.0, 3.0, 4.0]
    sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
                              expected_feature_values)
    with self.cached_session():
      self.assertAllEqual(expected_feature_values, sfc.feature_values.eval())
if __name__ == '__main__':
  googletest.main()
 | 
	apache-2.0 | -5,754,288,891,042,353,000 | 41.363636 | 100 | 0.709013 | false | 
| 
	chubbymaggie/angr | 
	tests/test_rol.py | 
	5 | 
	1328 | 
	import nose
import angr
from angr.calling_conventions import SimCCSystemVAMD64
import logging
l = logging.getLogger("angr.tests.test_rol")
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_rol_x86_64():
    binary_path = test_location + "/x86_64/test_rol.exe"
    proj = angr.Project(binary_path)
    initial_state = proj.factory.blank_state(addr=0x401000)
    r_rax = initial_state.se.BVS('rax', 64)
    initial_state.regs.rax = r_rax
    pg = proj.factory.simgr(initial_state, immutable=False)
    pg.explore(find=0x401013, avoid=0x401010)
    found_state = pg.found[0]
    result = found_state.se.eval(r_rax)
    nose.tools.assert_equal(result, 0x37B7AB70)
def test_rol_i386():
    binary_path = test_location + "/i386/test_rol.exe"
    proj = angr.Project(binary_path)
    initial_state = proj.factory.blank_state(addr=0x401000)
    r_eax = initial_state.se.BVS('eax', 32)
    initial_state.regs.eax = r_eax
    pg = proj.factory.simgr(initial_state, immutable=False)
    pg.explore(find=0x401013, avoid=0x401010)
    found_state = pg.found[0]
    result = found_state.se.eval(r_eax)
    nose.tools.assert_equal(result, 0x37B7AB70) 
def test_all():
    test_rol_x86_64()
    test_rol_i386()
if __name__ == "__main__":
    test_all()
    
 | 
	bsd-2-clause | -8,403,633,985,396,608,000 | 26.102041 | 102 | 0.676205 | false | 
| 
	dllsf/odootest | 
	addons/auth_signup/controllers/main.py | 
	165 | 
	6011 | 
	# -*- coding: utf-8 -*-
##############################################################################
#
#    OpenERP, Open Source Management Solution
#    Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU Affero General Public License as
#    published by the Free Software Foundation, either version 3 of the
#    License, or (at your option) any later version
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU Affero General Public License for more details
#
#    You should have received a copy of the GNU Affero General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import werkzeug
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.addons.web.controllers.main import ensure_db
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AuthSignupHome(openerp.addons.web.controllers.main.Home):
    @http.route()
    def web_login(self, *args, **kw):
        ensure_db()
        response = super(AuthSignupHome, self).web_login(*args, **kw)
        response.qcontext.update(self.get_auth_signup_config())
        if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
            # Redirect if already logged in and redirect param is present
            return http.redirect_with_hash(request.params.get('redirect'))
        return response
    @http.route('/web/signup', type='http', auth='public', website=True)
    def web_auth_signup(self, *args, **kw):
        qcontext = self.get_auth_signup_qcontext()
        if not qcontext.get('token') and not qcontext.get('signup_enabled'):
            raise werkzeug.exceptions.NotFound()
        if 'error' not in qcontext and request.httprequest.method == 'POST':
            try:
                self.do_signup(qcontext)
                return super(AuthSignupHome, self).web_login(*args, **kw)
            except (SignupError, AssertionError), e:
                qcontext['error'] = _(e.message)
        return request.render('auth_signup.signup', qcontext)
    @http.route('/web/reset_password', type='http', auth='public', website=True)
    def web_auth_reset_password(self, *args, **kw):
        qcontext = self.get_auth_signup_qcontext()
        if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
            raise werkzeug.exceptions.NotFound()
        if 'error' not in qcontext and request.httprequest.method == 'POST':
            try:
                if qcontext.get('token'):
                    self.do_signup(qcontext)
                    return super(AuthSignupHome, self).web_login(*args, **kw)
                else:
                    login = qcontext.get('login')
                    assert login, "No login provided."
                    res_users = request.registry.get('res.users')
                    res_users.reset_password(request.cr, openerp.SUPERUSER_ID, login)
                    qcontext['message'] = _("An email has been sent with credentials to reset your password")
            except SignupError:
                qcontext['error'] = _("Could not reset your password")
                _logger.exception('error when resetting password')
            except Exception, e:
                qcontext['error'] = _(e.message)
        return request.render('auth_signup.reset_password', qcontext)
    def get_auth_signup_config(self):
        """retrieve the module config (which features are enabled) for the login page"""
        icp = request.registry.get('ir.config_parameter')
        return {
            'signup_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.allow_uninvited') == 'True',
            'reset_password_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.reset_password') == 'True',
        }
    def get_auth_signup_qcontext(self):
        """ Shared helper returning the rendering context for signup and reset password """
        qcontext = request.params.copy()
        qcontext.update(self.get_auth_signup_config())
        if qcontext.get('token'):
            try:
                # retrieve the user info (name, login or email) corresponding to a signup token
                res_partner = request.registry.get('res.partner')
                token_infos = res_partner.signup_retrieve_info(request.cr, openerp.SUPERUSER_ID, qcontext.get('token'))
                for k, v in token_infos.items():
                    qcontext.setdefault(k, v)
            except:
                qcontext['error'] = _("Invalid signup token")
        return qcontext
    def do_signup(self, qcontext):
        """ Shared helper that creates a res.partner out of a token """
        values = dict((key, qcontext.get(key)) for key in ('login', 'name', 'password'))
        assert any([k for k in values.values()]), "The form was not properly filled in."
        assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them."
        self._signup_with_values(qcontext.get('token'), values)
        request.cr.commit()
    def _signup_with_values(self, token, values):
        db, login, password = request.registry['res.users'].signup(request.cr, openerp.SUPERUSER_ID, values, token)
        request.cr.commit()     # as authenticate will use its own cursor we need to commit the current transaction
        uid = request.session.authenticate(db, login, password)
        if not uid:
            raise SignupError(_('Authentification Failed.'))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
 | 
	agpl-3.0 | -4,256,783,637,298,582,000 | 46.330709 | 126 | 0.622359 | false | 
| 
	pkexcellent/luigi | 
	examples/elasticsearch_index.py | 
	57 | 
	3399 | 
	# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import json
import luigi
from luigi.contrib.esindex import CopyToIndex
class FakeDocuments(luigi.Task):
    """
    Generates a local file containing 5 elements of data in JSON format.
    """
    #: the date parameter.
    date = luigi.DateParameter(default=datetime.date.today())
    def run(self):
        """
        Writes data in JSON format into the task's output target.
        The data objects have the following attributes:
        * `_id` is the default Elasticsearch id field,
        * `text`: the text,
        * `date`: the day when the data was created.
        """
        today = datetime.date.today()
        with self.output().open('w') as output:
            for i in range(5):
                output.write(json.dumps({'_id': i, 'text': 'Hi %s' % i,
                                         'date': str(today)}))
                output.write('\n')
    def output(self):
        """
        Returns the target output for this task.
        In this case, a successful execution of this task will create a file on the local filesystem.
        :return: the target output for this task.
        :rtype: object (:py:class:`luigi.target.Target`)
        """
        return luigi.LocalTarget(path='/tmp/_docs-%s.ldj' % self.date)
class IndexDocuments(CopyToIndex):
    """
    This task loads JSON data contained in a :py:class:`luigi.target.Target` into an ElasticSearch index.
    This task's input will the target returned by :py:meth:`~.FakeDocuments.output`.
    This class uses :py:meth:`luigi.contrib.esindex.CopyToIndex.run`.
    After running this task you can run:
    .. code-block:: console
        $ curl "localhost:9200/example_index/_search?pretty"
    to see the indexed documents.
    To see the update log, run
    .. code-block:: console
        $ curl "localhost:9200/update_log/_search?q=target_index:example_index&pretty"
    To cleanup both indexes run:
    .. code-block:: console
        $ curl -XDELETE "localhost:9200/example_index"
        $ curl -XDELETE "localhost:9200/update_log/_query?q=target_index:example_index"
    """
    #: date task parameter (default = today)
    date = luigi.DateParameter(default=datetime.date.today())
    #: the name of the index in ElasticSearch to be updated.
    index = 'example_index'
    #: the name of the document type.
    doc_type = 'greetings'
    #: the host running the ElasticSearch service.
    host = 'localhost'
    #: the port used by the ElasticSearch service.
    port = 9200
    def requires(self):
        """
        This task's dependencies:
        * :py:class:`~.FakeDocuments`
        :return: object (:py:class:`luigi.task.Task`)
        """
        return FakeDocuments()
if __name__ == "__main__":
    luigi.run(['--task', 'IndexDocuments'])
 | 
	apache-2.0 | -6,298,217,546,386,609,000 | 28.556522 | 105 | 0.639894 | false | 
| 
	Bismarrck/tensorflow | 
	tensorflow/tools/dist_test/scripts_allreduce/k8s_generate_yaml.py | 
	11 | 
	2997 | 
	#!/usr/bin/python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates YAML configuration file for allreduce-based distributed TensorFlow.
The workers will be run in a Kubernetes (k8s) container cluster.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import k8s_generate_yaml_lib
# Note: It is intentional that we do not import tensorflow in this script. The
# machine that launches a TensorFlow k8s cluster does not have to have the
# Python package of TensorFlow installed on it.
DEFAULT_DOCKER_IMAGE = 'tensorflow/tensorflow:latest-devel'
DEFAULT_PORT = 22
DEFAULT_CONFIG_MAP = 'k8s-config-map'
DEFAULT_DEPLOYMENT = 'k8s-ml-deployment'
def main():
  """Do arg parsing."""
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--docker_image',
      type=str,
      default=DEFAULT_DOCKER_IMAGE,
      help='Override default docker image for the TensorFlow')
  parser.add_argument(
      '--num_containers',
      type=int,
      default=0,
      help='How many docker containers to launch')
  parser.add_argument(
      '--config_map',
      type=str,
      default=DEFAULT_CONFIG_MAP,
      help='Override default config map')
  parser.add_argument(
      '--deployment',
      type=str,
      default=DEFAULT_DEPLOYMENT,
      help='Override default deployment')
  parser.add_argument(
      '--ssh_port',
      type=int,
      default=DEFAULT_PORT,
      help='Override default ssh port (Default: %d)' % DEFAULT_PORT)
  parser.add_argument(
      '--use_hostnet',
      type=int,
      default=0,
      help='Used to enable host network mode (Default: 0)')
  parser.add_argument(
      '--use_shared_volume',
      type=int,
      default=0,
      help='Used to mount shared volume (Default: 0)')
  args = parser.parse_args()
  if args.num_containers <= 0:
    sys.stderr.write('--num_containers must be greater than 0; received %d\n' %
                     args.num_containers)
    sys.exit(1)
  # Generate contents of yaml config
  yaml_config = k8s_generate_yaml_lib.GenerateConfig(
      args.docker_image, args.num_containers, args.config_map, args.deployment,
      args.ssh_port, args.use_hostnet, args.use_shared_volume)
  print(yaml_config)  # pylint: disable=superfluous-parens
if __name__ == '__main__':
  main()
 | 
	apache-2.0 | -8,413,327,926,837,628,000 | 31.225806 | 80 | 0.674007 | false | 
| 
	therealfakemoot/collections2 | 
	collections2/dicts.py | 
	2 | 
	2578 | 
	from collections import MutableMapping
class OrderedDict(MutableMapping):
    '''OrderedDict is a mapping object that allows for ordered access
    and insertion of keys. With the exception of the key_index, insert, and
    reorder_keys methods behavior is identical to stock dictionary objects.'''
    def __init__(self, items=None):
        '''OrderedDict accepts an optional iterable of two-tuples
        indicating keys and values.'''
        self._d = dict()
        self._keys = []
        if items is None:
            return
        for key, value in items:
            self[key] = value
    def __len__(self):
        return len(self._d)
    def __iter__(self):
        for key in self._keys:
            yield key
    def __setitem__(self, key, value):
        if key not in self._keys:
            self._keys.append(key)
        self._d[key] = value
    def __getitem__(self, key):
        return self._d[key]
    def __delitem__(self, key):
        self._keys.remove(key)
        del self._d[key]
    def key_index(self, key):
        '''Accepts a parameter, :key:, and returns an integer value
        representing its index in the ordered list of keys.'''
        return self._keys.index(key)
    def insert(self, key, value, index):
        '''Accepts a :key:, :value:, and :index: parameter and inserts
        a new key, value member at the desired index.
        Note: Inserting with a negative index will have the following behavior:
        >>> l = [1, 2, 3, 4]
        >>> l.insert(-1, 5)
        >>> l
        [1, 2, 3, 5, 4]
        '''
        if key in self._keys:
            self._keys.remove(key)
        self._keys.insert(index, key)
        self._d[key] = value
    def reorder_keys(self, keys):
        '''Accepts a :keys: parameter, an iterable of keys in the
        desired new order. The :keys: parameter must contain all
        existing keys.'''
        if len(keys) != len(self._keys):
            raise ValueError('The supplied number of keys does not match.')
        if set(keys) != set(self._d.keys()):
            raise ValueError('The supplied keys do not match the current set of keys.')
        self._keys = keys
    def __repr__(self):
        return str([(key, self[key]) for key in self])
    def __eq__(self, other):
        if not isinstance(other, OrderedDict):
            return False
        return self.items() == other.items()
    def keys(self):
        """Return a copy of the _keys list instead of iterating over it as the MutableMapping does by default.
        """
        return list(self._keys)
 | 
	mit | -4,717,889,050,585,243,000 | 30.439024 | 110 | 0.576804 | false | 
			Subsets and Splits
				
	
				
			
				
Gradio Code Samples
												Limits the results to entries containing the word 'gradio' in the repo_name, content, or path, providing a filtered subset of the dataset.
													
