code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import sys
import os
import signal
import platform
from PyQt5.QtCore import Qt, QObject, QCoreApplication, QEvent, pyqtSlot, QLocale, QTranslator, QLibraryInfo, QT_VERSION_STR, PYQT_VERSION_STR
from PyQt5.QtQml import QQmlApplicationEngine, qmlRegisterType, qmlRegisterSingletonType
from PyQt5.QtWidgets import QApplication, QSplashScreen
from PyQt5.QtGui import QGuiApplication, QPixmap
from PyQt5.QtCore import QTimer
from UM.Application import Application
from UM.Qt.QtRenderer import QtRenderer
from UM.Qt.Bindings.Bindings import Bindings
from UM.Signal import Signal, signalemitter
from UM.Resources import Resources
from UM.Logger import Logger
from UM.Preferences import Preferences
from UM.i18n import i18nCatalog
import UM.Settings.InstanceContainer #For version upgrade to know the version number.
import UM.Settings.ContainerStack #For version upgrade to know the version number.
import UM.Preferences #For version upgrade to know the version number.
# Raised when we try to use an unsupported version of a dependency.
class UnsupportedVersionError(Exception):
pass
# Check PyQt version, we only support 5.4 or higher.
major, minor = PYQT_VERSION_STR.split(".")[0:2]
if int(major) < 5 or int(minor) < 4:
raise UnsupportedVersionError("This application requires at least PyQt 5.4.0")
## Application subclass that provides a Qt application object.
@signalemitter
class QtApplication(QApplication, Application):
def __init__(self, **kwargs):
plugin_path = ""
if sys.platform == "win32":
if hasattr(sys, "frozen"):
plugin_path = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "PyQt5", "plugins")
Logger.log("i", "Adding QT5 plugin path: %s" % (plugin_path))
QCoreApplication.addLibraryPath(plugin_path)
else:
import site
for dir in site.getsitepackages():
QCoreApplication.addLibraryPath(os.path.join(dir, "PyQt5", "plugins"))
elif sys.platform == "darwin":
plugin_path = os.path.join(Application.getInstallPrefix(), "Resources", "plugins")
if plugin_path:
Logger.log("i", "Adding QT5 plugin path: %s" % (plugin_path))
QCoreApplication.addLibraryPath(plugin_path)
os.environ["QSG_RENDER_LOOP"] = "basic"
super().__init__(sys.argv, **kwargs)
self._plugins_loaded = False #Used to determine when it's safe to use the plug-ins.
self._main_qml = "main.qml"
self._engine = None
self._renderer = None
self._main_window = None
self._shutting_down = False
self._qml_import_paths = []
self._qml_import_paths.append(os.path.join(os.path.dirname(sys.executable), "qml"))
self._qml_import_paths.append(os.path.join(Application.getInstallPrefix(), "Resources", "qml"))
self.setAttribute(Qt.AA_UseDesktopOpenGL)
try:
self._splash = self._createSplashScreen()
except FileNotFoundError:
self._splash = None
else:
self._splash.show()
self.processEvents()
signal.signal(signal.SIGINT, signal.SIG_DFL)
# This is done here as a lot of plugins require a correct gl context. If you want to change the framework,
# these checks need to be done in your <framework>Application.py class __init__().
i18n_catalog = i18nCatalog("uranium")
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Loading plugins..."))
self._loadPlugins()
self.parseCommandLine()
Logger.log("i", "Command line arguments: %s", self._parsed_command_line)
self._plugin_registry.checkRequiredPlugins(self.getRequiredPlugins())
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Updating configuration..."))
upgraded = UM.VersionUpgradeManager.VersionUpgradeManager.getInstance().upgrade()
if upgraded:
preferences = UM.Preferences.getInstance() #Preferences might have changed. Load them again.
#Note that the language can't be updated, so that will always revert to English.
try:
preferences.readFromFile(Resources.getPath(Resources.Preferences, self._application_name + ".cfg"))
except FileNotFoundError:
pass
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Loading preferences..."))
try:
file = Resources.getPath(Resources.Preferences, self.getApplicationName() + ".cfg")
Preferences.getInstance().readFromFile(file)
except FileNotFoundError:
pass
def run(self):
pass
def hideMessage(self, message):
with self._message_lock:
if message in self._visible_messages:
self._visible_messages.remove(message)
self.visibleMessageRemoved.emit(message)
def showMessage(self, message):
with self._message_lock:
if message not in self._visible_messages:
self._visible_messages.append(message)
message.setTimer(QTimer())
self.visibleMessageAdded.emit(message)
def setMainQml(self, path):
self._main_qml = path
def initializeEngine(self):
# TODO: Document native/qml import trickery
Bindings.register()
self._engine = QQmlApplicationEngine()
for path in self._qml_import_paths:
self._engine.addImportPath(path)
if not hasattr(sys, "frozen"):
self._engine.addImportPath(os.path.join(os.path.dirname(__file__), "qml"))
self._engine.rootContext().setContextProperty("QT_VERSION_STR", QT_VERSION_STR)
self._engine.rootContext().setContextProperty("screenScaleFactor", self._screenScaleFactor())
self.registerObjects(self._engine)
self._engine.load(self._main_qml)
self.engineCreatedSignal.emit()
engineCreatedSignal = Signal()
def isShuttingDown(self):
return self._shutting_down
def registerObjects(self, engine):
pass
def getRenderer(self):
if not self._renderer:
self._renderer = QtRenderer()
return self._renderer
def addCommandLineOptions(self, parser):
parser.add_argument("--disable-textures",
dest="disable-textures",
action="store_true", default=False,
help="Disable Qt texture loading as a workaround for certain crashes.")
# Overridden from QApplication::setApplicationName to call our internal setApplicationName
def setApplicationName(self, name):
Application.setApplicationName(self, name)
mainWindowChanged = Signal()
def getMainWindow(self):
return self._main_window
def setMainWindow(self, window):
if window != self._main_window:
self._main_window = window
self.mainWindowChanged.emit()
# Handle a function that should be called later.
def functionEvent(self, event):
e = _QtFunctionEvent(event)
QCoreApplication.postEvent(self, e)
# Handle Qt events
def event(self, event):
if event.type() == _QtFunctionEvent.QtFunctionEvent:
event._function_event.call()
return True
return super().event(event)
def windowClosed(self):
Logger.log("d", "Shutting down %s", self.getApplicationName())
self._shutting_down = True
try:
Preferences.getInstance().writeToFile(Resources.getStoragePath(Resources.Preferences, self.getApplicationName() + ".cfg"))
except Exception as e:
Logger.log("e", "Exception while saving preferences: %s", repr(e))
try:
self.applicationShuttingDown.emit()
except Exception as e:
Logger.log("e", "Exception while emitting shutdown signal: %s", repr(e))
try:
self.getBackend().close()
except Exception as e:
Logger.log("e", "Exception while closing backend: %s", repr(e))
self.quit()
## Load a Qt translation catalog.
#
# This method will locate, load and install a Qt message catalog that can be used
# by Qt's translation system, like qsTr() in QML files.
#
# \param file The file name to load, without extension. It will be searched for in
# the i18nLocation Resources directory. If it can not be found a warning
# will be logged but no error will be thrown.
# \param language The language to load translations for. This can be any valid language code
# or 'default' in which case the language is looked up based on system locale.
# If the specified language can not be found, this method will fall back to
# loading the english translations file.
#
# \note When `language` is `default`, the language to load can be changed with the
# environment variable "LANGUAGE".
def loadQtTranslation(self, file, language = "default"):
#TODO Add support for specifying a language from preferences
path = None
if language == "default":
path = self._getDefaultLanguage(file)
else:
path = Resources.getPath(Resources.i18n, language, "LC_MESSAGES", file + ".qm")
# If all else fails, fall back to english.
if not path:
Logger.log("w", "Could not find any translations matching {0} for file {1}, falling back to english".format(language, file))
try:
path = Resources.getPath(Resources.i18n, "en", "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
Logger.log("w", "Could not find English translations for file {0}. Switching to developer english.".format(file))
return
translator = QTranslator()
if not translator.load(path):
Logger.log("e", "Unable to load translations %s", file)
return
# Store a reference to the translator.
# This prevents the translator from being destroyed before Qt has a chance to use it.
self._translators[file] = translator
# Finally, install the translator so Qt can use it.
self.installTranslator(translator)
## Display text on the splash screen.
def showSplashMessage(self, message):
if self._splash:
self._splash.showMessage(message , Qt.AlignHCenter | Qt.AlignVCenter)
self.processEvents()
## Close the splash screen after the application has started.
def closeSplash(self):
if self._splash:
self._splash.close()
self._splash = None
def _createSplashScreen(self):
return QSplashScreen(QPixmap(Resources.getPath(Resources.Images, self.getApplicationName() + ".png")))
def _screenScaleFactor(self):
physical_dpi = QGuiApplication.primaryScreen().physicalDotsPerInch()
# Typically 'normal' screens have a DPI around 96. Modern high DPI screens are up around 220.
# We scale the low DPI screens with a traditional 1, and double the high DPI ones.
return 1.0 if physical_dpi < 150 else 2.0
def _getDefaultLanguage(self, file):
# If we have a language override set in the environment, try and use that.
lang = os.getenv("URANIUM_LANGUAGE")
if lang:
try:
return Resources.getPath(Resources.i18n, lang, "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
pass
# Else, try and get the current language from preferences
lang = Preferences.getInstance().getValue("general/language")
if lang:
try:
return Resources.getPath(Resources.i18n, lang, "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
pass
# If none of those are set, try to use the environment's LANGUAGE variable.
lang = os.getenv("LANGUAGE")
if lang:
try:
return Resources.getPath(Resources.i18n, lang, "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
pass
# If looking up the language from the enviroment or preferences fails, try and use Qt's system locale instead.
locale = QLocale.system()
# First, try and find a directory for any of the provided languages
for lang in locale.uiLanguages():
try:
return Resources.getPath(Resources.i18n, lang, "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
pass
# If that fails, see if we can extract a language "class" from the
# preferred language. This will turn "en-GB" into "en" for example.
lang = locale.uiLanguages()[0]
lang = lang[0:lang.find("-")]
try:
return Resources.getPath(Resources.i18n, lang, "LC_MESSAGES", file + ".qm")
except FileNotFoundError:
pass
return None
## Internal.
#
# Wrapper around a FunctionEvent object to make Qt handle the event properly.
class _QtFunctionEvent(QEvent):
QtFunctionEvent = QEvent.User + 1
def __init__(self, fevent):
super().__init__(self.QtFunctionEvent)
self._function_event = fevent
| onitake/Uranium | UM/Qt/QtApplication.py | Python | agpl-3.0 | 13,585 |
"""Test Workbench Runtime"""
from unittest import TestCase
import mock
from django.conf import settings
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xblock.runtime import KvsFieldData
from xblock.reference.user_service import UserService
from ..runtime import WorkbenchRuntime, ScenarioIdManager, WorkbenchDjangoKeyValueStore
class TestScenarioIds(TestCase):
"""
Test XBlock Scenario IDs
"""
def setUp(self):
# Test basic ID generation meets our expectations
self.id_mgr = ScenarioIdManager()
def test_no_scenario_loaded(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
def test_should_increment(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d1")
def test_slug_support(self):
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d0"
)
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d1"
)
def test_scenario_support(self):
self.test_should_increment()
# Now that we have a scenario, our definition numbering starts over again.
self.id_mgr.set_scenario("my_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d1")
self.id_mgr.set_scenario("another_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "another_scenario.my_block.d0")
def test_usages(self):
# Now make sure our usages are attached to definitions
self.assertIsNone(self.id_mgr.last_created_usage_id())
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u0"
)
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u1"
)
self.assertEqual(self.id_mgr.last_created_usage_id(), "my_scenario.my_block.d0.u1")
def test_asides(self):
definition_id = self.id_mgr.create_definition('my_block')
usage_id = self.id_mgr.create_usage(definition_id)
aside_definition, aside_usage = self.id_mgr.create_aside(definition_id, usage_id, 'my_aside')
self.assertEqual(self.id_mgr.get_aside_type_from_definition(aside_definition), 'my_aside')
self.assertEqual(self.id_mgr.get_definition_id_from_aside(aside_definition), definition_id)
self.assertEqual(self.id_mgr.get_aside_type_from_usage(aside_usage), 'my_aside')
self.assertEqual(self.id_mgr.get_usage_id_from_aside(aside_usage), usage_id)
class TestKVStore(TestCase):
"""
Test the Workbench KVP Store
"""
def setUp(self):
self.kvs = WorkbenchDjangoKeyValueStore()
self.key = KeyValueStore.Key(
scope=Scope.content,
user_id="rusty",
block_scope_id="my_scenario.my_block.d0",
field_name="age"
)
def test_storage(self):
self.assertFalse(self.kvs.has(self.key))
self.kvs.set(self.key, 7)
self.assertTrue(self.kvs.has(self.key))
self.assertEqual(self.kvs.get(self.key), 7)
self.kvs.delete(self.key)
self.assertFalse(self.kvs.has(self.key))
class StubService(object):
"""Empty service to test loading additional services. """
pass
class ExceptionService(object):
"""Stub service that raises an exception on init. """
def __init__(self):
raise Exception("Kaboom!")
class TestServices(TestCase):
"""
Test XBlock runtime services
"""
def setUp(self):
super(TestServices, self).setUp()
self.xblock = mock.Mock()
def test_default_services(self):
runtime = WorkbenchRuntime('test_user')
self._assert_default_services(runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'stub': 'workbench.test.test_runtime.StubService'
})
def test_settings_adds_services(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# An additional service should be provided
self._assert_service(runtime, 'stub', StubService)
# Check that the service has the runtime attribute set
service = runtime.service(self.xblock, 'stub')
self.assertIs(service.runtime, runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'not_found': 'workbench.test.test_runtime.NotFoundService'
})
def test_could_not_find_service(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'not_found'), None)
@mock.patch.dict(settings.WORKBENCH['services'], {
'exception': 'workbench.test.test_runtime.ExceptionService'
})
def test_runtime_service_initialization_failed(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'exception'), None)
def _assert_default_services(self, runtime):
"""Check that the default services are available. """
self._assert_service(runtime, 'field-data', KvsFieldData)
self._assert_service(runtime, 'user', UserService)
def _assert_service(self, runtime, service_name, service_class):
"""Check that a service is loaded. """
service_instance = runtime.service(self.xblock, service_name)
self.assertIsInstance(service_instance, service_class)
| Lyla-Fischer/xblock-sdk | workbench/test/test_runtime.py | Python | agpl-3.0 | 6,112 |
"""
Asset compilation and collection.
"""
from __future__ import print_function
import argparse
from paver.easy import sh, path, task, cmdopts, needs, consume_args, call_task
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import glob
import traceback
from .utils.envs import Env
from .utils.cmd import cmd, django_cmd
COFFEE_DIRS = ['lms', 'cms', 'common']
SASS_LOAD_PATHS = ['./common/static/sass']
SASS_UPDATE_DIRS = ['*/static']
SASS_CACHE_PATH = '/tmp/sass-cache'
class CoffeeScriptWatcher(PatternMatchingEventHandler):
"""
Watches for coffeescript changes
"""
ignore_directories = True
patterns = ['*.coffee']
def register(self, observer):
"""
register files with observer
"""
dirnames = set()
for filename in sh(coffeescript_files(), capture=True).splitlines():
dirnames.add(path(filename).dirname())
for dirname in dirnames:
observer.schedule(self, dirname)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
compile_coffeescript(event.src_path)
except Exception: # pylint: disable=W0703
traceback.print_exc()
class SassWatcher(PatternMatchingEventHandler):
"""
Watches for sass file changes
"""
ignore_directories = True
patterns = ['*.scss']
ignore_patterns = ['common/static/xmodule/*']
def register(self, observer):
"""
register files with observer
"""
for dirname in SASS_LOAD_PATHS + SASS_UPDATE_DIRS + theme_sass_paths():
paths = []
if '*' in dirname:
paths.extend(glob.glob(dirname))
else:
paths.append(dirname)
for dirname in paths:
observer.schedule(self, dirname, recursive=True)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
compile_sass()
except Exception: # pylint: disable=W0703
traceback.print_exc()
class XModuleSassWatcher(SassWatcher):
"""
Watches for sass file changes
"""
ignore_directories = True
ignore_patterns = []
def register(self, observer):
"""
register files with observer
"""
observer.schedule(self, 'common/lib/xmodule/', recursive=True)
def on_modified(self, event):
print('\tCHANGED:', event.src_path)
try:
process_xmodule_assets()
except Exception: # pylint: disable=W0703
traceback.print_exc()
def theme_sass_paths():
"""
Return the a list of paths to the theme's sass assets,
or an empty list if no theme is configured.
"""
edxapp_env = Env()
if edxapp_env.feature_flags.get('USE_CUSTOM_THEME', False):
theme_name = edxapp_env.env_tokens.get('THEME_NAME', '')
parent_dir = path(edxapp_env.REPO_ROOT).abspath().parent
theme_root = parent_dir / "themes" / theme_name
return [theme_root / "static" / "sass"]
else:
return []
def coffeescript_files():
"""
return find command for paths containing coffee files
"""
dirs = " ".join([Env.REPO_ROOT / coffee_dir for coffee_dir in COFFEE_DIRS])
return cmd('find', dirs, '-type f', '-name \"*.coffee\"')
def compile_coffeescript(*files):
"""
Compile CoffeeScript to JavaScript.
"""
if not files:
files = ["`{}`".format(coffeescript_files())]
sh(cmd(
"node_modules/.bin/coffee", "--compile", *files
))
def compile_sass(debug=False):
"""
Compile Sass to CSS.
"""
theme_paths = theme_sass_paths()
sh(cmd(
'sass', '' if debug else '--style compressed',
"--cache-location {cache}".format(cache=SASS_CACHE_PATH),
"--load-path", " ".join(SASS_LOAD_PATHS + theme_paths),
"--update", "-E", "utf-8", " ".join(SASS_UPDATE_DIRS + theme_paths)
))
def compile_templated_sass(systems, settings):
"""
Render Mako templates for Sass files.
`systems` is a list of systems (e.g. 'lms' or 'studio' or both)
`settings` is the Django settings module to use.
"""
for sys in systems:
sh(django_cmd(sys, settings, 'preprocess_assets'))
def process_xmodule_assets():
"""
Process XModule static assets.
"""
sh('xmodule_assets common/static/xmodule')
def collect_assets(systems, settings):
"""
Collect static assets, including Django pipeline processing.
`systems` is a list of systems (e.g. 'lms' or 'studio' or both)
`settings` is the Django settings module to use.
"""
for sys in systems:
sh(django_cmd(sys, settings, "collectstatic --noinput > /dev/null"))
@task
@cmdopts([('background', 'b', 'Background mode')])
def watch_assets(options):
"""
Watch for changes to asset files, and regenerate js/css
"""
observer = Observer()
CoffeeScriptWatcher().register(observer)
SassWatcher().register(observer)
XModuleSassWatcher().register(observer)
print("Starting asset watcher...")
observer.start()
if not getattr(options, 'background', False):
# when running as a separate process, the main thread needs to loop
# in order to allow for shutdown by contrl-c
try:
while True:
observer.join(2)
except KeyboardInterrupt:
observer.stop()
print("\nStopped asset watcher.")
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def update_assets(args):
"""
Compile CoffeeScript and Sass, then collect static assets.
"""
parser = argparse.ArgumentParser(prog='paver update_assets')
parser.add_argument(
'system', type=str, nargs='*', default=['lms', 'studio'],
help="lms or studio",
)
parser.add_argument(
'--settings', type=str, default="dev",
help="Django settings module",
)
parser.add_argument(
'--debug', action='store_true', default=False,
help="Disable Sass compression",
)
parser.add_argument(
'--skip-collect', dest='collect', action='store_false', default=True,
help="Skip collection of static assets",
)
parser.add_argument(
'--watch', action='store_true', default=False,
help="Watch files for changes",
)
args = parser.parse_args(args)
compile_templated_sass(args.system, args.settings)
process_xmodule_assets()
compile_coffeescript()
compile_sass(args.debug)
if args.collect:
collect_assets(args.system, args.settings)
if args.watch:
call_task('watch_assets', options={'background': not args.debug})
| torchingloom/edx-platform | pavelib/assets.py | Python | agpl-3.0 | 6,729 |
from . import orderpoint_generator
| Vauxoo/stock-logistics-warehouse | stock_orderpoint_generator/wizard/__init__.py | Python | agpl-3.0 | 35 |
# Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests of the oopsreferences core."""
__metaclass__ = type
from datetime import (
datetime,
timedelta,
)
from pytz import utc
from lp.registry.model.oopsreferences import referenced_oops
from lp.services.database.interfaces import IStore
from lp.services.messages.model.message import (
Message,
MessageSet,
)
from lp.testing import (
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
class TestOopsReferences(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestOopsReferences, self).setUp()
self.store = IStore(Message)
def test_oops_in_messagechunk(self):
oopsid = "OOPS-abcdef1234"
MessageSet().fromText('foo', "foo %s bar" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_messagesubject(self):
oopsid = "OOPS-abcdef1234"
self.factory.makeEmailMessage()
MessageSet().fromText("Crash with %s" % oopsid, "body")
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_bug_title(self):
oopsid = "OOPS-abcdef1234"
bug = self.factory.makeBug()
with person_logged_in(bug.owner):
bug.title = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_bug_description(self):
oopsid = "OOPS-abcdef1234"
bug = self.factory.makeBug()
with person_logged_in(bug.owner):
bug.description = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_question_title(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(title="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_wrong_context(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(title="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.store.flush()
self.failUnlessEqual(
set(),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id + 1}))
def test_oops_in_question_description(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(
description="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_whiteboard(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion()
with person_logged_in(question.owner):
question.whiteboard = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_distribution(self):
oopsid = "OOPS-abcdef1234"
distro = self.factory.makeDistribution()
question = self.factory.makeQuestion(target=distro)
with person_logged_in(question.owner):
question.whiteboard = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "distribution=%(distribution)s",
{'distribution': distro.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day,
"distribution=%(distribution)s", {'distribution': distro.id}))
def test_referenced_oops_in_urls_bug_663249(self):
# Sometimes OOPS ids appears as part of an URL. These should could as
# a reference even though they are not formatted specially - this
# requires somewhat special handling in the reference calculation
# function.
oopsid_old = "OOPS-abcdef1234"
oopsid_new = "OOPS-4321"
bug_old = self.factory.makeBug()
bug_new = self.factory.makeBug()
with person_logged_in(bug_old.owner):
bug_old.description = (
"foo https://lp-oops.canonical.com/oops.py?oopsid=%s bar"
% oopsid_old)
with person_logged_in(bug_new.owner):
bug_new.description = (
"foo https://oops.canonical.com/oops.py?oopsid=%s bar"
% oopsid_new)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid_old, oopsid_new]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=1", {}))
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/tests/test_oopsreferences.py | Python | agpl-3.0 | 7,128 |
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Mass Mailing Access Rights",
"summary": """
New group: Mass Mailing Manager. Managers can edit
and unlink mass mailings.""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
"mass_mailing",
],
"data": [
"security/groups.xml",
"security/ir.model.access.csv",
"views/mailing_mailing.xml",
"views/mail_template.xml",
],
"demo": [],
}
| mozaik-association/mozaik | mozaik_mass_mailing_access_rights/__manifest__.py | Python | agpl-3.0 | 625 |
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
# pylint: disable=missing-module-docstring, missing-function-docstring
import typing
import math
import contextlib
from timeit import default_timer
from operator import itemgetter
from searx.engines import engines
from .models import HistogramStorage, CounterStorage
from .error_recorder import count_error, count_exception, errors_per_engines
__all__ = ["initialize",
"get_engines_stats", "get_engine_errors",
"histogram", "histogram_observe", "histogram_observe_time",
"counter", "counter_inc", "counter_add",
"count_error", "count_exception"]
ENDPOINTS = {'search'}
histogram_storage: typing.Optional[HistogramStorage] = None
counter_storage: typing.Optional[CounterStorage] = None
@contextlib.contextmanager
def histogram_observe_time(*args):
h = histogram_storage.get(*args)
before = default_timer()
yield before
duration = default_timer() - before
if h:
h.observe(duration)
else:
raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
def histogram_observe(duration, *args):
histogram_storage.get(*args).observe(duration)
def histogram(*args, raise_on_not_found=True):
h = histogram_storage.get(*args)
if raise_on_not_found and h is None:
raise ValueError("histogram " + repr((*args,)) + " doesn't not exist")
return h
def counter_inc(*args):
counter_storage.add(1, *args)
def counter_add(value, *args):
counter_storage.add(value, *args)
def counter(*args):
return counter_storage.get(*args)
def initialize(engine_names=None):
"""
Initialize metrics
"""
global counter_storage, histogram_storage # pylint: disable=global-statement
counter_storage = CounterStorage()
histogram_storage = HistogramStorage()
# max_timeout = max of all the engine.timeout
max_timeout = 2
for engine_name in (engine_names or engines):
if engine_name in engines:
max_timeout = max(max_timeout, engines[engine_name].timeout)
# histogram configuration
histogram_width = 0.1
histogram_size = int(1.5 * max_timeout / histogram_width)
# engines
for engine_name in (engine_names or engines):
# search count
counter_storage.configure('engine', engine_name, 'search', 'count', 'sent')
counter_storage.configure('engine', engine_name, 'search', 'count', 'successful')
# global counter of errors
counter_storage.configure('engine', engine_name, 'search', 'count', 'error')
# score of the engine
counter_storage.configure('engine', engine_name, 'score')
# result count per requests
histogram_storage.configure(1, 100, 'engine', engine_name, 'result', 'count')
# time doing HTTP requests
histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'http')
# total time
# .time.request and ...response times may overlap .time.http time.
histogram_storage.configure(histogram_width, histogram_size, 'engine', engine_name, 'time', 'total')
def get_engine_errors(engline_name_list):
result = {}
engine_names = list(errors_per_engines.keys())
engine_names.sort()
for engine_name in engine_names:
if engine_name not in engline_name_list:
continue
error_stats = errors_per_engines[engine_name]
sent_search_count = max(counter('engine', engine_name, 'search', 'count', 'sent'), 1)
sorted_context_count_list = sorted(error_stats.items(), key=lambda context_count: context_count[1])
r = []
for context, count in sorted_context_count_list:
percentage = round(20 * count / sent_search_count) * 5
r.append({
'filename': context.filename,
'function': context.function,
'line_no': context.line_no,
'code': context.code,
'exception_classname': context.exception_classname,
'log_message': context.log_message,
'log_parameters': context.log_parameters,
'secondary': context.secondary,
'percentage': percentage,
})
result[engine_name] = sorted(r, reverse=True, key=lambda d: d['percentage'])
return result
def get_reliabilities(engline_name_list, checker_results):
reliabilities = {}
engine_errors = get_engine_errors(engline_name_list)
for engine_name in engline_name_list:
checker_result = checker_results.get(engine_name, {})
checker_success = checker_result.get('success', True)
errors = engine_errors.get(engine_name) or []
if counter('engine', engine_name, 'search', 'count', 'sent') == 0:
# no request
reliablity = None
elif checker_success and not errors:
reliablity = 100
elif 'simple' in checker_result.get('errors', {}):
# the basic (simple) test doesn't work: the engine is broken accoding to the checker
# even if there is no exception
reliablity = 0
else:
reliablity = 100 - sum([error['percentage'] for error in errors if not error.get('secondary')])
reliabilities[engine_name] = {
'reliablity': reliablity,
'errors': errors,
'checker': checker_results.get(engine_name, {}).get('errors', {}),
}
return reliabilities
def get_engines_stats(engine_name_list):
assert counter_storage is not None
assert histogram_storage is not None
list_time = []
max_time_total = max_result_count = None
for engine_name in engine_name_list:
sent_count = counter('engine', engine_name, 'search', 'count', 'sent')
if sent_count == 0:
continue
result_count = histogram('engine', engine_name, 'result', 'count').percentage(50)
result_count_sum = histogram('engine', engine_name, 'result', 'count').sum
successful_count = counter('engine', engine_name, 'search', 'count', 'successful')
time_total = histogram('engine', engine_name, 'time', 'total').percentage(50)
max_time_total = max(time_total or 0, max_time_total or 0)
max_result_count = max(result_count or 0, max_result_count or 0)
stats = {
'name': engine_name,
'total': None,
'total_p80': None,
'total_p95': None,
'http': None,
'http_p80': None,
'http_p95': None,
'processing': None,
'processing_p80': None,
'processing_p95': None,
'score': 0,
'score_per_result': 0,
'result_count': result_count,
}
if successful_count and result_count_sum:
score = counter('engine', engine_name, 'score')
stats['score'] = score
stats['score_per_result'] = score / float(result_count_sum)
time_http = histogram('engine', engine_name, 'time', 'http').percentage(50)
time_http_p80 = time_http_p95 = 0
if time_http is not None:
time_http_p80 = histogram('engine', engine_name, 'time', 'http').percentage(80)
time_http_p95 = histogram('engine', engine_name, 'time', 'http').percentage(95)
stats['http'] = round(time_http, 1)
stats['http_p80'] = round(time_http_p80, 1)
stats['http_p95'] = round(time_http_p95, 1)
if time_total is not None:
time_total_p80 = histogram('engine', engine_name, 'time', 'total').percentage(80)
time_total_p95 = histogram('engine', engine_name, 'time', 'total').percentage(95)
stats['total'] = round(time_total, 1)
stats['total_p80'] = round(time_total_p80, 1)
stats['total_p95'] = round(time_total_p95, 1)
stats['processing'] = round(time_total - (time_http or 0), 1)
stats['processing_p80'] = round(time_total_p80 - time_http_p80, 1)
stats['processing_p95'] = round(time_total_p95 - time_http_p95, 1)
list_time.append(stats)
return {
'time': list_time,
'max_time': math.ceil(max_time_total or 0),
'max_result_count': math.ceil(max_result_count or 0),
}
| dalf/searx | searx/metrics/__init__.py | Python | agpl-3.0 | 8,348 |
#!/usr/bin/env python
# encoding: utf-8
import os
import geoip2.database
from geoip2.errors import AddressNotFoundError
from cortexutils.analyzer import Analyzer
class MaxMindAnalyzer(Analyzer):
def dump_city(self, city):
return {
'confidence': city.confidence,
'geoname_id': city.geoname_id,
'name': city.name,
'names': city.names
}
def dump_continent(self, continent):
return {
'code': continent.code,
'geoname_id': continent.geoname_id,
'name': continent.name,
'names': continent.names,
}
def dump_country(self, country):
return {
'confidence': country.confidence,
'geoname_id': country.geoname_id,
'iso_code': country.iso_code,
'name': country.name,
'names': country.names
}
def dump_location(self, location):
return {
'accuracy_radius': location.accuracy_radius,
'latitude': location.latitude,
'longitude': location.longitude,
'metro_code': location.metro_code,
'time_zone': location.time_zone
}
def dump_traits(self, traits):
return {
'autonomous_system_number': traits.autonomous_system_number,
'autonomous_system_organization': traits.autonomous_system_organization,
'domain': traits.domain,
'ip_address': traits.ip_address,
'is_anonymous_proxy': traits.is_anonymous_proxy,
'is_satellite_provider': traits.is_satellite_provider,
'isp': traits.isp,
'organization': traits.organization,
'user_type': traits.user_type
}
def summary(self, raw):
taxonomies = []
level = "info"
namespace = "MaxMind"
predicate = "Location"
if "continent" in raw:
value = "{}/{}".format(raw["country"]["name"], raw["continent"]["name"])
taxonomies.append(self.build_taxonomy(level, namespace, predicate, value))
return {"taxonomies": taxonomies}
def run(self):
Analyzer.run(self)
if self.data_type == 'ip':
try:
data = self.get_data()
city = geoip2.database.Reader(os.path.dirname(__file__) + '/GeoLite2-City.mmdb').city(data)
self.report({
'city': self.dump_city(city.city),
'continent': self.dump_continent(city.continent),
'country': self.dump_country(city.country),
'location': self.dump_location(city.location),
'registered_country': self.dump_country(city.registered_country),
'represented_country': self.dump_country(city.represented_country),
'subdivisions': self.dump_country(city.subdivisions.most_specific),
'traits': self.dump_traits(city.traits)
})
except ValueError as e:
self.error('Invalid IP address')
except AddressNotFoundError as e:
self.error('Unknown IP address')
except Exception as e:
self.unexpectedError(type(e))
else:
self.notSupported()
if __name__ == '__main__':
MaxMindAnalyzer().run()
| CERT-BDF/Cortex-Analyzers | analyzers/MaxMind/geo.py | Python | agpl-3.0 | 3,380 |
from contextlib import suppress
from ereuse_devicehub.resources.account.domain import AccountDomain, UserNotFound
from ereuse_devicehub.resources.device.domain import DeviceDomain
def materialize_actual_owners_remove(events: list):
for event in events:
properties = {'$pull': {'owners': event['from']}}
DeviceDomain.update_raw(event.get('components', []), properties)
return DeviceDomain.update_raw(event['devices'], properties)
def set_organization(deallocates: list):
for deallocate in deallocates:
with suppress(UserNotFound, KeyError): # todo ensure organization is not always needed
deallocate['fromOrganization'] = AccountDomain.get_one(deallocate['from'])['organization']
| eReuse/DeviceHub | ereuse_devicehub/resources/event/device/deallocate/hooks.py | Python | agpl-3.0 | 737 |
#!/usr/bin/env python
# UPE Programming competiton 2015-11-1 "railforest" solution by Avi Weinstock
import re
import sys
INFINITY = float('inf')
line_to_ints = lambda line: [int(x, 10) for x in re.split(' *', line)]
def solver(width, height, grid):
prevs = [[0, list()] for _ in range(width)]
for (y, line) in enumerate(grid):
#print(line)
currents = []
for (x,cost) in enumerate(line):
if cost < 0:
cost = INFINITY
cur = [INFINITY, []]
for dx in [-1,0,1]:
if x+dx < 0 or x+dx >= width:
continue
#print(x,x+dx)
tmp = prevs[x+dx]
if tmp[0]+cost < cur[0]:
cur[0] = tmp[0] + cost
cur[1] = tmp[1] + [x]
currents.append(cur)
prevs = currents
solution = min([x for (x,_) in prevs])
if solution == INFINITY:
solution = -1
return solution
def main():
lines = sys.stdin.read().split('\n')
(height, width) = line_to_ints(lines[0])
#print(height,width)
grid = [line_to_ints(line) for line in lines[1:height+1]]
print(solver(width, height, grid))
if __name__ == '__main__':
main()
| aweinstock314/aweinstock-programming-competition-solutions | upe_competition_2015_11_01/railforest/railforest_weinsa.py | Python | agpl-3.0 | 1,246 |
# -*- coding: utf-8 -*- pylint: disable-msg=R0801
#
# Copyright (c) 2013 Rodolphe Quiédeville <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
API definition
"""
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.throttle import BaseThrottle
from cotetra.survey.models import Journey, Connection
from cotetra.network.api import StationResource
class JourneyResource(ModelResource):
"""
The journeys
"""
station_from = fields.ForeignKey(StationResource, 'station_from')
station_to = fields.ForeignKey(StationResource, 'station_to')
class Meta:
queryset = Journey.objects.all()
resource_name = 'journey'
throttle = BaseThrottle(throttle_at=100, timeframe=60)
class ConnectionResource(ModelResource):
"""
The connections
"""
station_from = fields.ForeignKey(StationResource, 'station_from')
station_to = fields.ForeignKey(StationResource, 'station_to')
class Meta:
queryset = Connection.objects.all()
resource_name = 'connection'
throttle = BaseThrottle(throttle_at=100, timeframe=60)
| rodo/cotetra | cotetra/survey/api.py | Python | agpl-3.0 | 1,782 |
# These are the instance-dependent settings. Copy this file to
# secrets.py and apply the desired settings.
#
# Only one variable is required here, SECRET_KEY. Fill this using:
# http://www.miniwebtool.com/django-secret-key-generator/
SECRET_KEY = ''
# In your development setup, you can leave the following variables
# unset:
#STATIC_ROOT =
#MEDIA_ROOT =
#DEBUG =
#DATABASES =
#EMAIL_BACKEND =
#EMAIL_USE_TLS =
#EMAIL_HOST =
#EMAIL_PORT =
#EMAIL_HOST_USER =
#EMAIL_HOST_PASSWORD =
#SESSION_COOKIE_DOMAIN =
#CSRF_COOKIE_DOMAIN =
#SECURE_HSTS_SECONDS =
| SAlkhairy/trabd | trabd/secrets.template.py | Python | agpl-3.0 | 557 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2015 bloopark systems (<http://bloopark.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import xml.etree.ElementTree as ET
import urllib2
import werkzeug.utils
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.controllers.main import Website
class Website(Website):
@http.route(['/<path:seo_url>'], type='http', auth="public", website=True)
def path_page(self, seo_url, **kwargs):
"""Handle SEO urls for ir.ui.views.
ToDo: Add additional check for field seo_url_parent. Otherwise it is
possible to use invalid url structures. For example: if you have two
pages 'study-1' and 'study-2' with the same seo_url_level and different
seo_url_parent you can use '/ecommerce/study-1/how-to-do-it-right' and
'/ecommerce/study-2/how-to-do-it-right' to call the page
'how-to-do-it-right'.
"""
env = request.env(context=request.context)
seo_url_parts = [s.encode('utf8') for s in seo_url.split('/')
if s != '']
views = env['ir.ui.view'].search([('seo_url', 'in', seo_url_parts)],
order='seo_url_level ASC')
page = 'website.404'
if len(seo_url_parts) == len(views):
seo_url_check = [v.seo_url.encode('utf8') for v in views]
current_view = views[-1]
if (seo_url_parts == seo_url_check
and (current_view.seo_url_level + 1) == len(views)):
page = current_view.xml_id
if page == 'website.404':
try:
url = self.look_for_redirect_url(seo_url, **kwargs)
if url:
return request.redirect(url, code=301)
assert url is not None
except Exception, e:
return request.registry['ir.http']._handle_exception(e, 404)
if page == 'website.404' and request.website.is_publisher():
page = 'website.page_404'
return request.render(page, {})
def look_for_redirect_url(self, seo_url, **kwargs):
env = request.env(context=request.context)
if not seo_url.startswith('/'):
seo_url = '/' + seo_url
lang = env.context.get('lang', False)
if not lang:
lang = request.website.default_lang_code
lang = env['res.lang'].get_code_from_alias(lang)
domain = [('url', '=', seo_url), ('lang', '=', lang)]
data = env['website.seo.redirect'].search(domain)
if data:
model, rid = data[0].resource.split(',')
resource = env[model].browse(int(rid))
return resource.get_seo_path()[0]
@http.route()
def page(self, page, **opt):
try:
view = request.website.get_template(page)
if view.seo_url:
return request.redirect(view.get_seo_path()[0], code=301)
except:
pass
return super(Website, self).page(page, **opt)
@http.route(['/website/seo_suggest'], type='json', auth='user', website=True)
def seo_suggest(self, keywords=None, lang=None):
url = "http://google.com/complete/search"
try:
params = {
'ie': 'utf8',
'oe': 'utf8',
'output': 'toolbar',
'q': keywords,
}
if lang:
language = lang.split("_")
params.update({
'hl': language[0],
'gl': language[1] if len(language) > 1 else ''
})
req = urllib2.Request("%s?%s" % (url, werkzeug.url_encode(params)))
request = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
# TODO: shouldn't this return {} ?
return []
xmlroot = ET.fromstring(request.read())
return [sugg[0].attrib['data'] for sugg in xmlroot if len(sugg) and sugg[0].attrib['data']]
| blooparksystems/website | website_seo/controllers/main.py | Python | agpl-3.0 | 4,887 |
def test_imprint(app, client):
app.config["SKYLINES_IMPRINT"] = u"foobar"
res = client.get("/imprint")
assert res.status_code == 200
assert res.json == {u"content": u"foobar"}
def test_team(client):
res = client.get("/team")
assert res.status_code == 200
content = res.json["content"]
assert "## Developers" in content
assert "* Tobias Bieniek (<[email protected]> // maintainer)\n" in content
assert "## Developers" in content
def test_license(client):
res = client.get("/license")
assert res.status_code == 200
content = res.json["content"]
assert "GNU AFFERO GENERAL PUBLIC LICENSE" in content
| skylines-project/skylines | tests/api/views/about_test.py | Python | agpl-3.0 | 664 |
"""
Django settings for kore project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9j++(0=dc&6w&113d4bofcjy1xy-pe$frla&=s*8w94=0ym0@&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'grappelli',
'nested_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'raven.contrib.django.raven_compat',
'django_extensions',
'rest_framework',
'corsheaders',
'modeltranslation',
'leaflet',
'munigeo',
'schools',
'django_filters'
]
if DEBUG:
# INSTALLED_APPS.insert(0, 'devserver')
# INSTALLED_APPS.insert(0, 'debug_toolbar')
pass
MIDDLEWARE_CLASSES = (
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kore.urls'
WSGI_APPLICATION = 'kore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'kore',
}
}
# Munigeo
# https://github.com/City-of-Helsinki/munigeo
PROJECTION_SRID = 3067
# If no country specified (for example through a REST API call), use this
# as default.
DEFAULT_COUNTRY = 'fi'
# The word used for municipality in the OCD identifiers in the default country.
DEFAULT_OCD_MUNICIPALITY = 'kunta'
BOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('fi', gettext('Finnish')),
('sv', gettext('Swedish')),
('en', gettext('English')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
LOCALE_PATH = os.path.join(BASE_DIR, "schools", "locale")
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'MAX_PAGINATE_BY': 1000, # Maximum limit allowed when using `?page_size=xxx`.
'DEFAULT_FILTER_BACKENDS':
('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_ALLOW_ALL = True
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| City-of-Helsinki/kore | kore/settings.py | Python | agpl-3.0 | 4,485 |
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Website Event Track",
"summary": """
This module allows to see the event menu configuration
even without activated debug mode""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
# Odoo
"website_event_track",
],
"data": [
"views/event_event.xml",
],
}
| mozaik-association/mozaik | mozaik_website_event_track/__manifest__.py | Python | agpl-3.0 | 525 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Mappers
=======
Mappers are the ConnectorUnit classes responsible to transform
external records into OpenERP records and conversely.
"""
import logging
from collections import namedtuple
from contextlib import contextmanager
from ..connector import ConnectorUnit, MetaConnectorUnit, ConnectorEnvironment
from ..exception import MappingError, NoConnectorUnitError
_logger = logging.getLogger(__name__)
def mapping(func):
""" Declare that a method is a mapping method.
It is then used by the :py:class:`Mapper` to convert the records.
Usage::
@mapping
def any(self, record):
return {'output_field': record['input_field']}
"""
func.is_mapping = True
return func
def changed_by(*args):
""" Decorator for the mapping methods (:py:func:`mapping`)
When fields are modified in OpenERP, we want to export only the
modified fields. Using this decorator, we can specify which fields
updates should trigger which mapping method.
If ``changed_by`` is empty, the mapping is always active.
As far as possible, this decorator should be used for the exports,
thus, when we do an update on only a small number of fields on a
record, the size of the output record will be limited to only the
fields really having to be exported.
Usage::
@changed_by('input_field')
@mapping
def any(self, record):
return {'output_field': record['input_field']}
:param *args: field names which trigger the mapping when modified
"""
def register_mapping(func):
func.changed_by = args
return func
return register_mapping
def only_create(func):
""" Decorator for the mapping methods (:py:func:`mapping`)
A mapping decorated with ``only_create`` means that it has to be
used only for the creation of the records.
Usage::
@only_create
@mapping
def any(self, record):
return {'output_field': record['input_field']}
"""
func.only_create = True
return func
def none(field):
""" A modifier intended to be used on the ``direct`` mappings.
Replace the False-ish values by None.
It can be used in a pipeline of modifiers when .
Example::
direct = [(none('source'), 'target'),
(none(m2o_to_backend('rel_id'), 'rel_id')]
:param field: name of the source field in the record
:param binding: True if the relation is a binding record
"""
def modifier(self, record, to_attr):
if callable(field):
result = field(self, record, to_attr)
else:
result = record[field]
if not result:
return None
return result
return modifier
def convert(field, conv_type):
""" A modifier intended to be used on the ``direct`` mappings.
Convert a field's value to a given type.
Example::
direct = [(convert('source', str), 'target')]
:param field: name of the source field in the record
:param binding: True if the relation is a binding record
"""
def modifier(self, record, to_attr):
value = record[field]
if not value:
return False
return conv_type(value)
return modifier
def m2o_to_backend(field, binding=None):
""" A modifier intended to be used on the ``direct`` mappings.
For a many2one, get the ID on the backend and returns it.
When the field's relation is not a binding (i.e. it does not point to
something like ``magento.*``), the binding model needs to be provided
in the ``binding`` keyword argument.
Example::
direct = [(m2o_to_backend('country_id', binding='magento.res.country'),
'country'),
(m2o_to_backend('magento_country_id'), 'country')]
:param field: name of the source field in the record
:param binding: name of the binding model is the relation is not a binding
"""
def modifier(self, record, to_attr):
if not record[field]:
return False
column = self.model._fields[field]
if column.type != 'many2one':
raise ValueError('The column %s should be a Many2one, got %s' %
(field, type(column)))
rel_id = record[field].id
if binding is None:
binding_model = column.comodel_name
else:
binding_model = binding
binder = self.binder_for(binding_model)
# if a relation is not a binding, we wrap the record in the
# binding, we'll return the id of the binding
wrap = bool(binding)
value = binder.to_backend(rel_id, wrap=wrap)
if not value:
raise MappingError("Can not find an external id for record "
"%s in model %s %s wrapping" %
(rel_id, binding_model,
'with' if wrap else 'without'))
return value
return modifier
def backend_to_m2o(field, binding=None, with_inactive=False):
""" A modifier intended to be used on the ``direct`` mappings.
For a field from a backend which is an ID, search the corresponding
binding in OpenERP and returns its ID.
When the field's relation is not a binding (i.e. it does not point to
something like ``magento.*``), the binding model needs to be provided
in the ``binding`` keyword argument.
Example::
direct = [(backend_to_m2o('country', binding='magento.res.country'),
'country_id'),
(backend_to_m2o('country'), 'magento_country_id')]
:param field: name of the source field in the record
:param binding: name of the binding model is the relation is not a binding
:param with_inactive: include the inactive records in OpenERP in the search
"""
def modifier(self, record, to_attr):
if not record[field]:
return False
column = self.model._fields[to_attr]
if column.type != 'many2one':
raise ValueError('The column %s should be a Many2one, got %s' %
(to_attr, type(column)))
rel_id = record[field]
if binding is None:
binding_model = column.comodel_name
else:
binding_model = binding
binder = self.binder_for(binding_model)
# if we want the ID of a normal record, not a binding,
# we ask the unwrapped id to the binder
unwrap = bool(binding)
with self.session.change_context(active_test=False):
value = binder.to_openerp(rel_id, unwrap=unwrap)
if not value:
raise MappingError("Can not find an existing %s for external "
"record %s %s unwrapping" %
(binding_model, rel_id,
'with' if unwrap else 'without'))
return value
return modifier
MappingDefinition = namedtuple('MappingDefinition',
['changed_by',
'only_create'])
class MetaMapper(MetaConnectorUnit):
""" Metaclass for Mapper
Build a ``_map_methods`` dict of mappings methods.
The keys of the dict are the method names.
The values of the dict are a namedtuple containing:
"""
def __new__(meta, name, bases, attrs):
if attrs.get('_map_methods') is None:
attrs['_map_methods'] = {}
cls = super(MetaMapper, meta).__new__(meta, name, bases, attrs)
# When a class has several bases: ``class Mapper(Base1, Base2):``
for base in bases:
# Merge the _map_methods of the bases
base_map_methods = getattr(base, '_map_methods', {})
for attr_name, definition in base_map_methods.iteritems():
if cls._map_methods.get(attr_name) is None:
cls._map_methods[attr_name] = definition
else:
# Update the existing @changed_by with the content
# of each base (it is mutated in place).
# @only_create keeps the value defined in the first
# base.
mapping_changed_by = cls._map_methods[attr_name].changed_by
mapping_changed_by.update(definition.changed_by)
# Update the _map_methods from the @mapping methods in attrs,
# respecting the class tree.
for attr_name, attr in attrs.iteritems():
is_mapping = getattr(attr, 'is_mapping', None)
if is_mapping:
has_only_create = getattr(attr, 'only_create', False)
mapping_changed_by = set(getattr(attr, 'changed_by', ()))
# If already existing, it has been defined in a super
# class, extend the @changed_by set
if cls._map_methods.get(attr_name) is not None:
definition = cls._map_methods[attr_name]
mapping_changed_by.update(definition.changed_by)
# keep the last choice for only_create
definition = MappingDefinition(mapping_changed_by,
has_only_create)
cls._map_methods[attr_name] = definition
return cls
def __init__(cls, name, bases, attrs):
"""
Build a ``_changed_by_fields`` list of synchronized fields with mapper.
It takes in account the ``direct`` fields and the fields declared in
the decorator : ``changed_by``.
"""
changed_by_fields = set()
if attrs.get('direct'):
for from_attr, __ in attrs['direct']:
attr_name = cls._direct_source_field_name(from_attr)
changed_by_fields.add(attr_name)
for method_name, method_def in attrs['_map_methods'].iteritems():
changed_by_fields |= method_def[0]
for base in bases:
if hasattr(base, '_changed_by_fields') and base._changed_by_fields:
changed_by_fields |= base._changed_by_fields
cls._changed_by_fields = changed_by_fields
super(MetaMapper, cls).__init__(name, bases, attrs)
@staticmethod
def _direct_source_field_name(mapping_attr):
""" Get the mapping field name. Goes through the function modifiers.
Ex: [(none(convert(field_name, str)), out_field_name)]
It assumes that the modifier has ``field`` as first argument like:
def modifier(field, args):
"""
attr_name = mapping_attr
if callable(mapping_attr):
# Map the closure entries with variable names
cells = dict(zip(
mapping_attr.func_code.co_freevars,
(c.cell_contents for c in mapping_attr.func_closure)))
assert 'field' in cells, "Modifier without 'field' argument."
if callable(cells['field']):
attr_name = MetaMapper._direct_source_field_name(
cells['field'])
else:
attr_name = cells['field']
return attr_name
class MapChild(ConnectorUnit):
""" MapChild is responsible to convert items.
Items are sub-records of a main record.
In this example, the items are the records in ``lines``::
sales = {'name': 'SO10',
'lines': [{'product_id': 1, 'quantity': 2},
{'product_id': 2, 'quantity': 2}]}
A MapChild is always called from another :py:class:`Mapper` which
provides a ``children`` configuration.
Considering the example above, the "main" :py:class:`Mapper` would
returns something as follows::
{'name': 'SO10',
'lines': [(0, 0, {'product_id': 11, 'quantity': 2}),
(0, 0, {'product_id': 12, 'quantity': 2})]}
A MapChild is responsible to:
* Find the :py:class:`Mapper` to convert the items
* Possibly filter out some lines (can be done by inheriting
:py:meth:`skip_item`)
* Convert the items' records using the found :py:class:`Mapper`
* Format the output values to the format expected by OpenERP or the
backend (as seen above with ``(0, 0, {values})``
A MapChild can be extended like any other
:py:class:`~connector.connector.ConnectorUnit`.
However, it is not mandatory to explicitly create a MapChild for
each children mapping, the default one will be used
(:py:class:`ImportMapChild` or :py:class:`ExportMapChild`).
The implementation by default does not take care of the updates: if
I import a sales order 2 times, the lines will be duplicated. This
is not a problem as long as an importation should only support the
creation (typical for sales orders). It can be implemented on a
case-by-case basis by inheriting :py:meth:`get_item_values` and
:py:meth:`format_items`.
"""
_model_name = None
def _child_mapper(self):
raise NotImplementedError
def skip_item(self, map_record):
""" Hook to implement in sub-classes when some child
records should be skipped.
The parent record is accessible in ``map_record``.
If it returns True, the current child record is skipped.
:param map_record: record that we are converting
:type map_record: :py:class:`MapRecord`
"""
return False
def get_items(self, items, parent, to_attr, options):
""" Returns the formatted output values of items from a main record
:param items: list of item records
:type items: list
:param parent: parent record
:param to_attr: destination field (can be used for introspecting
the relation)
:type to_attr: str
:param options: dict of options, herited from the main mapper
:return: formatted output values for the item
"""
mapper = self._child_mapper()
mapped = []
for item in items:
map_record = mapper.map_record(item, parent=parent)
if self.skip_item(map_record):
continue
mapped.append(self.get_item_values(map_record, to_attr, options))
return self.format_items(mapped)
def get_item_values(self, map_record, to_attr, options):
""" Get the raw values from the child Mappers for the items.
It can be overridden for instance to:
* Change options
* Use a :py:class:`~connector.connector.Binder` to know if an
item already exists to modify an existing item, rather than to
add it
:param map_record: record that we are converting
:type map_record: :py:class:`MapRecord`
:param to_attr: destination field (can be used for introspecting
the relation)
:type to_attr: str
:param options: dict of options, herited from the main mapper
"""
return map_record.values(**options)
def format_items(self, items_values):
""" Format the values of the items mapped from the child Mappers.
It can be overridden for instance to add the OpenERP
relationships commands ``(6, 0, [IDs])``, ...
As instance, it can be modified to handle update of existing
items: check if an 'id' has been defined by
:py:meth:`get_item_values` then use the ``(1, ID, {values}``)
command
:param items_values: mapped values for the items
:type items_values: list
"""
return items_values
class ImportMapChild(MapChild):
""" :py:class:`MapChild` for the Imports """
def _child_mapper(self):
return self.unit_for(ImportMapper)
def format_items(self, items_values):
""" Format the values of the items mapped from the child Mappers.
It can be overridden for instance to add the OpenERP
relationships commands ``(6, 0, [IDs])``, ...
As instance, it can be modified to handle update of existing
items: check if an 'id' has been defined by
:py:meth:`get_item_values` then use the ``(1, ID, {values}``)
command
:param items_values: list of values for the items to create
:type items_values: list
"""
return [(0, 0, values) for values in items_values]
class ExportMapChild(MapChild):
""" :py:class:`MapChild` for the Exports """
def _child_mapper(self):
return self.unit_for(ExportMapper)
class Mapper(ConnectorUnit):
""" A Mapper translates an external record to an OpenERP record and
conversely. The output of a Mapper is a ``dict``.
3 types of mappings are supported:
Direct Mappings
Example::
direct = [('source', 'target')]
Here, the ``source`` field will be copied in the ``target`` field.
A modifier can be used in the source item.
The modifier will be applied to the source field before being
copied in the target field.
It should be a closure function respecting this idiom::
def a_function(field):
''' ``field`` is the name of the source field.
Naming the arg: ``field`` is required for the conversion'''
def modifier(self, record, to_attr):
''' self is the current Mapper,
record is the current record to map,
to_attr is the target field'''
return record[field]
return modifier
And used like that::
direct = [
(a_function('source'), 'target'),
]
A more concrete example of modifier::
def convert(field, conv_type):
''' Convert the source field to a defined ``conv_type``
(ex. str) before returning it'''
def modifier(self, record, to_attr):
value = record[field]
if not value:
return None
return conv_type(value)
return modifier
And used like that::
direct = [
(convert('myfield', float), 'target_field'),
]
More examples of modifiers:
* :py:func:`convert`
* :py:func:`m2o_to_backend`
* :py:func:`backend_to_m2o`
Method Mappings
A mapping method allows to execute arbitrary code and return one
or many fields::
@mapping
def compute_state(self, record):
# compute some state, using the ``record`` or not
state = 'pending'
return {'state': state}
We can also specify that a mapping methods should be applied
only when an object is created, and never applied on further
updates::
@only_create
@mapping
def default_warehouse(self, record):
# get default warehouse
warehouse_id = ...
return {'warehouse_id': warehouse_id}
Submappings
When a record contains sub-items, like the lines of a sales order,
we can convert the children using another Mapper::
children = [('items', 'line_ids', 'model.name')]
It allows to create the sales order and all its lines with the
same call to :py:meth:`openerp.models.BaseModel.create()`.
When using ``children`` for items of a record, we need to create
a :py:class:`Mapper` for the model of the items, and optionally a
:py:class:`MapChild`.
Usage of a Mapper::
mapper = Mapper(env)
map_record = mapper.map_record(record)
values = map_record.values()
values = map_record.values(only_create=True)
values = map_record.values(fields=['name', 'street'])
"""
__metaclass__ = MetaMapper
# name of the OpenERP model, to be defined in concrete classes
_model_name = None
direct = [] # direct conversion of a field to another (from_attr, to_attr)
children = [] # conversion of sub-records (from_attr, to_attr, model)
_map_methods = None
_map_child_class = None
def __init__(self, connector_env):
"""
:param connector_env: current environment (backend, session, ...)
:type connector_env: :py:class:`connector.connector.Environment`
"""
super(Mapper, self).__init__(connector_env)
self._options = None
def _map_direct(self, record, from_attr, to_attr):
""" Apply the ``direct`` mappings.
:param record: record to convert from a source to a target
:param from_attr: name of the source attribute or a callable
:type from_attr: callable | str
:param to_attr: name of the target attribute
:type to_attr: str
"""
raise NotImplementedError
def _map_children(self, record, attr, model):
raise NotImplementedError
@property
def map_methods(self):
""" Yield all the methods decorated with ``@mapping`` """
for meth, definition in self._map_methods.iteritems():
yield getattr(self, meth), definition
def _get_map_child_unit(self, model_name):
try:
mapper_child = self.unit_for(self._map_child_class,
model=model_name)
except NoConnectorUnitError:
# does not force developers to use a MapChild ->
# will use the default one if not explicitely defined
env = ConnectorEnvironment(self.backend_record,
self.session,
model_name)
mapper_child = self._map_child_class(env)
return mapper_child
def _map_child(self, map_record, from_attr, to_attr, model_name):
""" Convert items of the record as defined by children """
assert self._map_child_class is not None, "_map_child_class required"
child_records = map_record.source[from_attr]
mapper_child = self._get_map_child_unit(model_name)
items = mapper_child.get_items(child_records, map_record,
to_attr, options=self.options)
return items
@contextmanager
def _mapping_options(self, options):
""" Change the mapping options for the Mapper.
Context Manager to use in order to alter the behavior
of the mapping, when using ``_apply`` or ``finalize``.
"""
current = self._options
self._options = options
yield
self._options = current
@property
def options(self):
""" Options can be accessed in the mapping methods with
``self.options``. """
return self._options
def map_record(self, record, parent=None):
""" Get a :py:class:`MapRecord` with record, ready to be
converted using the current Mapper.
:param record: record to transform
:param parent: optional parent record, for items
"""
return MapRecord(self, record, parent=parent)
def _apply(self, map_record, options=None):
""" Apply the mappings on a :py:class:`MapRecord`
:param map_record: source record to convert
:type map_record: :py:class:`MapRecord`
"""
if options is None:
options = {}
with self._mapping_options(options):
return self._apply_with_options(map_record)
def _apply_with_options(self, map_record):
""" Apply the mappings on a :py:class:`MapRecord` with
contextual options (the ``options`` given in
:py:meth:`MapRecord.values()` are accessible in
``self.options``)
:param map_record: source record to convert
:type map_record: :py:class:`MapRecord`
"""
assert self.options is not None, (
"options should be defined with '_mapping_options'")
_logger.debug('converting record %s to model %s',
map_record.source, self.model)
fields = self.options.fields
for_create = self.options.for_create
result = {}
for from_attr, to_attr in self.direct:
if callable(from_attr):
attr_name = MetaMapper._direct_source_field_name(from_attr)
else:
attr_name = from_attr
if (not fields or attr_name in fields):
value = self._map_direct(map_record.source,
from_attr,
to_attr)
result[to_attr] = value
for meth, definition in self.map_methods:
mapping_changed_by = definition.changed_by
if (not fields or not mapping_changed_by or
mapping_changed_by.intersection(fields)):
if definition.only_create and not for_create:
continue
values = meth(map_record.source)
if not values:
continue
if not isinstance(values, dict):
raise ValueError('%s: invalid return value for the '
'mapping method %s' % (values, meth))
result.update(values)
for from_attr, to_attr, model_name in self.children:
if (not fields or from_attr in fields):
result[to_attr] = self._map_child(map_record, from_attr,
to_attr, model_name)
return self.finalize(map_record, result)
def finalize(self, map_record, values):
""" Called at the end of the mapping.
Can be used to modify the values before returning them, as the
``on_change``.
:param map_record: source map_record
:type map_record: :py:class:`MapRecord`
:param values: mapped values
:returns: mapped values
:rtype: dict
"""
return values
class ImportMapper(Mapper):
""" :py:class:`Mapper` for imports.
Transform a record from a backend to an OpenERP record
"""
_map_child_class = ImportMapChild
def _map_direct(self, record, from_attr, to_attr):
""" Apply the ``direct`` mappings.
:param record: record to convert from a source to a target
:param from_attr: name of the source attribute or a callable
:type from_attr: callable | str
:param to_attr: name of the target attribute
:type to_attr: str
"""
if callable(from_attr):
return from_attr(self, record, to_attr)
value = record.get(from_attr)
if not value:
return False
# Backward compatibility: when a field is a relation, and a modifier is
# not used, we assume that the relation model is a binding.
# Use an explicit modifier backend_to_m2o in the 'direct' mappings to
# change that.
field = self.model._fields[to_attr]
if field.type == 'many2one':
mapping_func = backend_to_m2o(from_attr)
value = mapping_func(self, record, to_attr)
return value
class ExportMapper(Mapper):
""" :py:class:`Mapper` for exports.
Transform a record from OpenERP to a backend record
"""
_map_child_class = ExportMapChild
def _map_direct(self, record, from_attr, to_attr):
""" Apply the ``direct`` mappings.
:param record: record to convert from a source to a target
:param from_attr: name of the source attribute or a callable
:type from_attr: callable | str
:param to_attr: name of the target attribute
:type to_attr: str
"""
if callable(from_attr):
return from_attr(self, record, to_attr)
value = record[from_attr]
if not value:
return False
# Backward compatibility: when a field is a relation, and a modifier is
# not used, we assume that the relation model is a binding.
# Use an explicit modifier m2o_to_backend in the 'direct' mappings to
# change that.
field = self.model._fields[from_attr]
if field.type == 'many2one':
mapping_func = m2o_to_backend(from_attr)
value = mapping_func(self, record, to_attr)
return value
class MapRecord(object):
""" A record prepared to be converted using a :py:class:`Mapper`.
MapRecord instances are prepared by :py:meth:`Mapper.map_record`.
Usage::
mapper = SomeMapper(env)
map_record = mapper.map_record(record)
output_values = map_record.values()
See :py:meth:`values` for more information on the available arguments.
"""
def __init__(self, mapper, source, parent=None):
self._source = source
self._mapper = mapper
self._parent = parent
self._forced_values = {}
@property
def source(self):
""" Source record to be converted """
return self._source
@property
def parent(self):
""" Parent record if the current record is an item """
return self._parent
def values(self, for_create=None, fields=None, **kwargs):
""" Build and returns the mapped values according to the options.
Usage::
mapper = SomeMapper(env)
map_record = mapper.map_record(record)
output_values = map_record.values()
Creation of records
When using the option ``for_create``, only the mappings decorated
with ``@only_create`` will be mapped.
::
output_values = map_record.values(for_create=True)
Filter on fields
When using the ``fields`` argument, the mappings will be
filtered using either the source key in ``direct`` arguments,
either the ``changed_by`` arguments for the mapping methods.
::
output_values = map_record.values(fields=['name', 'street'])
Custom options
Arbitrary key and values can be defined in the ``kwargs``
arguments. They can later be used in the mapping methods
using ``self.options``.
::
output_values = map_record.values(tax_include=True)
:param for_create: specify if only the mappings for creation
(``@only_create``) should be mapped.
:type for_create: boolean
:param fields: filter on fields
:type fields: list
:param **kwargs: custom options, they can later be used in the
mapping methods
"""
options = MapOptions(for_create=for_create, fields=fields, **kwargs)
values = self._mapper._apply(self, options=options)
values.update(self._forced_values)
return values
def update(self, *args, **kwargs):
""" Force values to be applied after a mapping.
Usage::
mapper = SomeMapper(env)
map_record = mapper.map_record(record)
map_record.update(a=1)
output_values = map_record.values()
# output_values will at least contain {'a': 1}
The values assigned with ``update()`` are in any case applied,
they have a greater priority than the mapping values.
"""
self._forced_values.update(*args, **kwargs)
class MapOptions(dict):
""" Container for the options of mappings.
Options can be accessed using attributes of the instance. When an
option is accessed and does not exist, it returns None.
"""
def __getitem__(self, key):
try:
return super(MapOptions, self).__getitem__(key)
except KeyError:
return None
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
| zhaohuaw/connector | connector/unit/mapper.py | Python | agpl-3.0 | 32,917 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-21 11:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('submission', '0011_auto_20170921_0937'),
('identifiers', '0003_brokendoi_journal'),
]
operations = [
migrations.RemoveField(
model_name='brokendoi',
name='journal',
),
migrations.AddField(
model_name='brokendoi',
name='article',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='submission.Article'),
preserve_default=False,
),
]
| BirkbeckCTP/janeway | src/identifiers/migrations/0004_auto_20170921_1113.py | Python | agpl-3.0 | 750 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import json, web
from lib.log import Log
class Env(object):
@staticmethod
def get(key):
if key and key in web.ctx.env:
return web.ctx.env[key]
else:
return web.ctx.env
@staticmethod
def set(key, value):
web.ctx.env[key] = value
@staticmethod
def setFromFile(file):
fenv = open(file)
jenv = json.load(fenv)
for key,value in jenv.items():
web.ctx.env[key] = value | ZiTAL/zpy | private/lib/env.py | Python | agpl-3.0 | 439 |
from comics.aggregator.crawler import CrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Billy'
language = 'no'
url = 'http://www.billy.no/'
start_date = '1950-01-01'
active = False
rights = 'Mort Walker'
class Crawler(CrawlerBase):
def crawl(self, pub_date):
pass # Comic no longer published
| datagutten/comics | comics/comics/billy.py | Python | agpl-3.0 | 384 |
from setuptools import setup, find_packages
XMODULES = [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"vertical = xmodule.vertical_module:VerticalDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"wrapper = xmodule.wrapper_module:WrapperDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'distribute',
'docopt',
'capa',
'path.py',
'webob',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES,
'xmodule.v1': XMODULES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
| pku9104038/edx-platform | common/lib/xmodule/setup.py | Python | agpl-3.0 | 3,169 |
#!/usr/bin/env python
#
# Freesound is (c) MUSIC TECHNOLOGY GROUP, UNIVERSITAT POMPEU FABRA
#
# Freesound is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Freesound is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# See AUTHORS file.
#
# 03/10/2013: Modified from original code
import sys
from compmusic.extractors.imagelib.MelSpectrogramImage import create_wave_images
from processing import AudioProcessingException
'''
parser = optparse.OptionParser("usage: %prog [options] input-filename", conflict_handler="resolve")
parser.add_option("-a", "--waveout", action="store", dest="output_filename_w", type="string", help="output waveform image (default input filename + _w.png)")
parser.add_option("-s", "--specout", action="store", dest="output_filename_s", type="string", help="output spectrogram image (default input filename + _s.jpg)")
parser.add_option("-w", "--width", action="store", dest="image_width", type="int", help="image width in pixels (default %default)")
parser.add_option("-h", "--height", action="store", dest="image_height", type="int", help="image height in pixels (default %default)")
parser.add_option("-f", "--fft", action="store", dest="fft_size", type="int", help="fft size, power of 2 for increased performance (default %default)")
parser.add_option("-p", "--profile", action="store_true", dest="profile", help="run profiler and output profiling information")
parser.set_defaults(output_filename_w=None, output_filename_s=None, image_width=500, image_height=171, fft_size=2048)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
parser.error("not enough arguments")
if len(args) > 1 and (options.output_filename_w != None or options.output_filename_s != None):
parser.error("when processing multiple files you can't define the output filename!")
'''
def progress_callback(percentage):
sys.stdout.write(str(percentage) + "% ")
sys.stdout.flush()
# process all files so the user can use wildcards like *.wav
def genimages(input_file, output_file_w, output_file_s, output_file_m, options):
args = (input_file, output_file_w, output_file_s, output_file_m, options.image_width, options.image_height,
options.fft_size, progress_callback, options.f_min, options.f_max, options.scale_exp, options.pallete)
print("processing file %s:\n\t" % input_file, end="")
try:
create_wave_images(*args)
except AudioProcessingException as e:
print("Error running wav2png: ", e)
| MTG/pycompmusic | compmusic/extractors/imagelib/wav2png.py | Python | agpl-3.0 | 3,050 |
# -*- coding: utf-8 -*-
#===============================================================================
# Custom res_users object
# Add an CAFAT ID for use in New Caledonia
#===============================================================================
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class res_users(orm.Model):
"""
Custom res_users object
Add a CAFAT ID for use in New Caledonia
It's for odoo user not partner
For partner you'll find the CAFAT ID in res.parner object
"""
_inherit = "res.users"
_columns = {
'cafat_id':fields.char('CAFAT ID', size = 16, help = 'CAFAT ID of the doctor = convention number. This is not the CAFAT Number as for a patient'),
}
| frouty/odoogoeen | extra-addons/oph/oph/custom/oph_res_users.py | Python | agpl-3.0 | 773 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import django_filters
from django.forms import TextInput
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
class EntityListSerializer(serializers.Serializer):
acronym = serializers.CharField()
title = serializers.CharField()
entity_type = serializers.CharField()
# Display human readable value
entity_type_text = serializers.CharField(source='get_entity_type_display', read_only=True)
organization = serializers.SerializerMethodField()
select_url = serializers.SerializerMethodField()
def get_organization(self, obj):
return str(obj.entity.organization)
def get_select_url(self, obj):
return reverse(
"entity_read",
kwargs={'entity_version_id': obj.id}
)
| uclouvain/osis | base/forms/entity.py | Python | agpl-3.0 | 2,540 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platform
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
| leeping/mdtraj | tests/test_reporter.py | Python | lgpl-2.1 | 7,439 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
from spack.main import SpackCommand
versions = SpackCommand('versions')
def test_safe_only_versions():
"""Only test the safe versions of a package.
(Using the deprecated command line argument)
"""
versions('--safe-only', 'zlib')
def test_safe_versions():
"""Only test the safe versions of a package."""
versions('--safe', 'zlib')
@pytest.mark.network
def test_remote_versions():
"""Test a package for which remote versions should be available."""
versions('zlib')
@pytest.mark.network
def test_remote_versions_only():
"""Test a package for which remote versions should be available."""
versions('--remote', 'zlib')
@pytest.mark.network
@pytest.mark.usefixtures('mock_packages')
def test_new_versions_only():
"""Test a package for which new versions should be available."""
versions('--new', 'brillig')
@pytest.mark.network
def test_no_versions():
"""Test a package for which no remote versions are available."""
versions('converge')
@pytest.mark.network
def test_no_unchecksummed_versions():
"""Test a package for which no unchecksummed versions are available."""
versions('bzip2')
@pytest.mark.network
def test_versions_no_url():
"""Test a package with versions but without a ``url`` attribute."""
versions('graphviz')
@pytest.mark.network
def test_no_versions_no_url():
"""Test a package without versions or a ``url`` attribute."""
versions('opengl')
| iulian787/spack | lib/spack/spack/test/cmd/versions.py | Python | lgpl-2.1 | 1,677 |
############################################################################
#
# Copyright (C) 2015 The Qt Company Ltd.
# Contact: http://www.qt.io/licensing
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms and
# conditions see http://www.qt.io/terms-conditions. For further information
# use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, The Qt Company gives you certain additional
# rights. These rights are described in The Qt Company LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
from dumper import *
def dumpLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
def qdump__Core__Id(d, value):
try:
name = d.parseAndEvaluate("Core::nameForId(%d)" % value["m_id"])
d.putSimpleCharArray(name)
except:
d.putValue(value["m_id"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__GdbMi(d, value):
str = d.encodeByteArray(value["m_name"]) + "3a20" \
+ d.encodeByteArray(value["m_data"])
d.putValue(str, Hex2EncodedLatin1)
d.putPlainChildren(value)
def qdump__Debugger__Internal__DisassemblerLine(d, value):
d.putByteArrayValue(value["m_data"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchData(d, value):
d.putByteArrayValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchItem(d, value):
d.putByteArrayValue(value["d"]["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__BreakpointModelId(d, value):
d.putValue("%s.%s" % (int(value["m_majorPart"]), int(value["m_minorPart"])))
d.putPlainChildren(value)
def qdump__Debugger__Internal__ThreadId(d, value):
d.putValue("%s" % value["m_id"])
d.putPlainChildren(value)
def qdump__CPlusPlus__ByteArrayRef(d, value):
d.putSimpleCharArray(value["m_start"], value["m_length"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Identifier(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Symbol(d, value):
name = d.downcast(value["_name"])
dumpLiteral(d, name)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__IntegerType(d, value):
d.putValue(value["_kind"])
d.putPlainChildren(value)
def qdump__CPlusPlus__NamedType(d, value):
literal = d.downcast(value["_name"])
dumpLiteral(d, literal)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__TemplateNameId(d, value):
dumpLiteral(d, value["_identifier"].dereference())
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Literal(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__StringLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__Value(d, value):
d.putValue(value["l"])
d.putPlainChildren(value)
def qdump__Utils__FileName(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__Utils__ElfSection(d, value):
d.putByteArrayValue(value["name"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Token(d, value):
k = value["f"]["kind"]
if int(k) == 6:
d.putValue("T_IDENTIFIER. offset: %d, len: %d"
% (value["utf16charOffset"], value["f"]["utf16chars"]))
elif int(k) == 7:
d.putValue("T_NUMERIC_LITERAL. offset: %d, len: %d"
% (value["utf16charOffset"], value["f"]["utf16chars"]))
else:
val = str(k.cast(d.lookupType("CPlusPlus::Kind")))
d.putValue(val[11:]) # Strip "CPlusPlus::"
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__PPToken(d, value):
data, size, alloc = d.byteArrayData(value["m_src"])
length = int(value["f"]["utf16chars"])
offset = int(value["utf16charOffset"])
#warn("size: %s, alloc: %s, offset: %s, length: %s, data: %s"
# % (size, alloc, offset, length, data))
d.putValue(d.readMemory(data + offset, min(100, length)),
Hex2EncodedLatin1)
d.putPlainChildren(value)
| kuba1/qtcreator | share/qtcreator/debugger/creatortypes.py | Python | lgpl-2.1 | 5,087 |
#!/usr/bin/env python
from basetest import BaseTest
import sys, tempfile, os, time
import unittest
import data
sys.path.insert(0, '..')
from zeroinstall.injector import model, gpg, trust
from zeroinstall.injector.namespaces import config_site
from zeroinstall.injector.iface_cache import PendingFeed
from zeroinstall.support import basedir
class TestIfaceCache(BaseTest):
def testList(self):
iface_cache = self.config.iface_cache
self.assertEquals([], iface_cache.list_all_interfaces())
iface_dir = basedir.save_cache_path(config_site, 'interfaces')
file(os.path.join(iface_dir, 'http%3a%2f%2ffoo'), 'w').close()
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
# TODO: test overrides
def testCheckSigned(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
feed_url = 'http://foo'
src = tempfile.TemporaryFile()
# Unsigned
src.write("hello")
src.flush()
src.seek(0)
try:
PendingFeed(feed_url, src)
assert 0
except model.SafeException:
pass
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
# Signed
src.seek(0)
src.write(data.foo_signed_xml)
src.flush()
src.seek(0)
pending = PendingFeed(feed_url, src)
assert iface_cache.update_feed_if_trusted(feed_url, pending.sigs, pending.new_xml)
self.assertEquals(['http://foo'],
iface_cache.list_all_interfaces())
feed = iface_cache.get_feed(feed_url)
self.assertEquals(1154850229, feed.last_modified)
def testXMLupdate(self):
iface_cache = self.config.iface_cache
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
iface = iface_cache.get_interface('http://foo')
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
pending = PendingFeed(iface.uri, src)
assert iface_cache.update_feed_if_trusted(iface.uri, pending.sigs, pending.new_xml)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified == 1154850229
# mtimes are unreliable because copying often changes them -
# check that we extract the time from the signature when upgrading
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(feed.url))
os.utime(cached, None)
iface_cache.__init__()
feed = iface_cache.get_feed('http://foo')
assert feed.last_modified > 1154850229
src = tempfile.TemporaryFile()
src.write(data.new_foo_signed_xml)
src.seek(0)
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
# Can't 'update' to an older copy
src = tempfile.TemporaryFile()
src.write(data.foo_signed_xml)
src.seek(0)
try:
pending = PendingFeed(feed.url, src)
assert iface_cache.update_feed_if_trusted(feed.url, pending.sigs, pending.new_xml)
assert 0
except model.SafeException:
pass
def testTimes(self):
iface_cache = self.config.iface_cache
stream = tempfile.TemporaryFile()
stream.write(data.thomas_key)
stream.seek(0)
gpg.import_key(stream)
upstream_dir = basedir.save_cache_path(config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape('http://foo'))
stream = file(cached, 'w')
stream.write(data.foo_signed_xml)
stream.close()
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
trust.trust_db.trust_key(
'92429807C9853C0744A68B9AAE07828059A53CC1')
signed = iface_cache._get_signature_date('http://foo')
assert signed == 1154850229
stream = file(cached, 'w+')
stream.seek(0)
stream.write('Hello')
stream.close()
# When the signature is invalid, we just return None.
# This is because versions < 0.22 used to corrupt the signatue
# by adding an attribute to the XML
signed = iface_cache._get_signature_date('http://foo')
assert signed == None
def testCheckAttempt(self):
iface_cache = self.config.iface_cache
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar.xml"))
start_time = time.time() - 5 # Seems to be some odd rounding here
iface_cache.mark_as_checking("http://foo/bar.xml")
last_check = iface_cache.get_last_check_attempt("http://foo/bar.xml")
assert last_check is not None
assert last_check >= start_time, (last_check, start_time)
self.assertEquals(None, iface_cache.get_last_check_attempt("http://foo/bar2.xml"))
if __name__ == '__main__':
unittest.main()
| pombredanne/zero-install | tests/testifacecache.py | Python | lgpl-2.1 | 4,642 |
#!PYRTIST:VERSION:0:0:1
from pyrtist.lib2d import Point, Tri
#!PYRTIST:REFPOINTS:BEGIN
bbox1 = Point(0.0, 50.0); bbox2 = Point(100.0, 12.5838926174)
p1 = Point(3.15540458874, 46.942241204)
p2 = Point(3.23537580547, 42.1395946309)
p4 = Point(28.5119375629, 38.1285583893)
q1 = Point(73.1545885714, 21.8120805369)
q3 = Point(93.6244457143, 38.4228187919)
q5 = Point(66.4133738602, 33.8755592617)
q6 = Point(94.2249240122, 24.9089847651)
q7 = Point(84.8024316109, 26.7326948322)
q2 = Tri(q5, Point(70.1344457143, 37.2483221477))
q4 = Tri(q6, Point(90.4365171429, 20.1342281879), q7)
#!PYRTIST:REFPOINTS:END
# TUTORIAL EXAMPLE N.3
# How to create a closed figure using bezier curves.
from pyrtist.lib2d import *
w = Window()
s = " "
font = Font(2, "Helvetica")
w << Args(
Text(p1, font, Offset(0, 1), Color.red,
"Pyrtist allows creating curves (cubic Bezier splines)."
"\nThis example explains how."),
Text(p2, font, Offset(0, 1),
"STEP 1: launch Pyrtist or create a new document (CTRL+N)\n",
"STEP 2: click on the button to create a new curved polygon\n",
"STEP 3: move the mouse where you want to create the first vertex.\n",
s, "Click on the left button of the mouse\n",
"STEP 4: repeat step 3 to create other 3 vertices. You should see\n",
s, "a black polygon with straight boundaries\n",
"STEP 5: move the mouse over one of the vertices. Press the CTRL\n",
s, "key and the left mouse button, simultaneously. Keep them\n",
s, "pressed while moving the mouse out of the vertex. A round\n",
s, "reference point appears and the polygon edge is rounded.\n",
"STEP 6: you can repeat step 5 for the same vertex or for other\n",
s, "vertices. You should obtain something similar to what shown on\n",
s, "the left")
)
# The line below is what draws the curved polygon.
w << Curve(q1, q2, q3, q4)
w << Image("curve.png", p4, 2.5)
w << BBox(bbox1, bbox2)
gui(w)
| mfnch/pyrtist | pyrtist/examples/tutorial03-curved_polys.py | Python | lgpl-2.1 | 1,984 |
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
Pychan extension widgets.
Extension widgets are partly experimental, partly rarely used widgets
which are added here. They are by default not included in the widgets
registry and thus cannot be loaded from XML files. Use L{pychan.widgets.registerWidget}
to enable that.
Not the same care to keep the API stable will be taken for them and
before and if they are added (or replace) the standard widgets they
will have to be reviewed in detail.
"""
| mgeorgehansen/FIFE_Technomage | engine/python/fife/extensions/pychan/widgets/ext/__init__.py | Python | lgpl-2.1 | 1,492 |
#!/usr/bin/python
import sys
import requests
try:
url = sys.argv[1]
r = requests.get('http://%s' %url ,timeout=3)
except requests.exceptions.Timeout:
print 'url timeout\n%s' %url
sys.exit(2)
except:
print 'url error \n%s' %url
sys.exit(2)
url_status = r.status_code
if url_status == 200:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(0)
else:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(2) | XiaJieCom/change | document/Service/nagios/nrpe/check_url.py | Python | lgpl-2.1 | 489 |
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import shutil
from cerbero.config import Platform
from cerbero.utils import shell
# Clean-up LD environment to avoid library version mismatches while running
# the system subversion
CLEAN_ENV = os.environ.copy()
if CLEAN_ENV.has_key('LD_LIBRARY_PATH'):
CLEAN_ENV.pop('LD_LIBRARY_PATH')
GIT = 'git'
def init(git_dir):
'''
Initialize a git repository with 'git init'
@param git_dir: path of the git repository
@type git_dir: str
'''
shell.call('mkdir -p %s' % git_dir)
shell.call('%s init' % GIT, git_dir, env=CLEAN_ENV)
def clean(git_dir):
'''
Clean a git respository with clean -dfx
@param git_dir: path of the git repository
@type git_dir: str
'''
return shell.call('%s clean -dfx' % GIT, git_dir, env=CLEAN_ENV)
def list_tags(git_dir, fail=True):
'''
List all tags
@param git_dir: path of the git repository
@type git_dir: str
@param fail: raise an error if the command failed
@type fail: false
@return: list of tag names (str)
@rtype: list
'''
tags = shell.check_call('%s tag -l' % GIT, git_dir, fail=fail, env=CLEAN_ENV)
tags = tags.strip()
if tags:
tags = tags.split('\n')
return tags
def create_tag(git_dir, tagname, tagdescription, commit, fail=True):
'''
Create a tag using commit
@param git_dir: path of the git repository
@type git_dir: str
@param tagname: name of the tag to create
@type tagname: str
@param tagdescription: the tag description
@type tagdescription: str
@param commit: the tag commit to use
@type commit: str
@param fail: raise an error if the command failed
@type fail: false
'''
shell.call('%s tag -s %s -m "%s" %s' %
(GIT, tagname, tagdescription, commit), git_dir, fail=fail,
env=CLEAN_ENV)
return shell.call('%s push origin %s' % (GIT, tagname), git_dir, fail=fail,
env=CLEAN_ENV)
def delete_tag(git_dir, tagname, fail=True):
'''
Delete a tag
@param git_dir: path of the git repository
@type git_dir: str
@param tagname: name of the tag to delete
@type tagname: str
@param fail: raise an error if the command failed
@type fail: false
'''
return shell.call('%s tag -d %s' % (GIT, tagname), git_dir, fail=fail,
env=CLEAN_ENV)
def fetch(git_dir, fail=True):
'''
Fetch all refs from all the remotes
@param git_dir: path of the git repository
@type git_dir: str
@param fail: raise an error if the command failed
@type fail: false
'''
return shell.call('%s fetch --all' % GIT, git_dir, fail=fail, env=CLEAN_ENV)
def submodules_update(git_dir, src_dir=None, fail=True):
'''
Update somdules from local directory
@param git_dir: path of the git repository
@type git_dir: str
@param src_dir: path or base URI of the source directory
@type src_dir: src
@param fail: raise an error if the command failed
@type fail: false
'''
if src_dir:
config = shell.check_call('%s config --file=.gitmodules --list' % GIT,
git_dir)
config_array = [s.split('=', 1) for s in config.split('\n')]
for c in config_array:
if c[0].startswith('submodule.') and c[0].endswith('.path'):
submodule = c[0][len('submodule.'):-len('.path')]
shell.call("%s config --file=.gitmodules submodule.%s.url %s" %
(GIT, submodule, os.path.join(src_dir, c[1])),
git_dir)
shell.call("%s submodule init" % GIT, git_dir)
shell.call("%s submodule sync" % GIT, git_dir)
shell.call("%s submodule update" % GIT, git_dir, fail=fail)
if src_dir:
for c in config_array:
if c[0].startswith('submodule.') and c[0].endswith('.url'):
shell.call("%s config --file=.gitmodules %s %s" %
(GIT, c[0], c[1]), git_dir)
shell.call("%s submodule sync" % GIT, git_dir)
def checkout(git_dir, commit):
'''
Reset a git repository to a given commit
@param git_dir: path of the git repository
@type git_dir: str
@param commit: the commit to checkout
@type commit: str
'''
return shell.call('%s reset --hard %s' % (GIT, commit), git_dir,
env=CLEAN_ENV)
def get_hash(git_dir, commit):
'''
Get a commit hash from a valid commit.
Can be used to check if a commit exists
@param git_dir: path of the git repository
@type git_dir: str
@param commit: the commit to log
@type commit: str
'''
return shell.check_call('%s show -s --pretty=%%H %s' %
(GIT, commit), git_dir, env=CLEAN_ENV)
def local_checkout(git_dir, local_git_dir, commit):
'''
Clone a repository for a given commit in a different location
@param git_dir: destination path of the git repository
@type git_dir: str
@param local_git_dir: path of the source git repository
@type local_git_dir: str
@param commit: the commit to checkout
@type commit: false
'''
# reset to a commit in case it's the first checkout and the masterbranch is
# missing
branch_name = 'cerbero_build'
shell.call('%s reset --hard %s' % (GIT, commit), local_git_dir,
env=CLEAN_ENV)
shell.call('%s branch %s' % (GIT, branch_name), local_git_dir, fail=False,
env=CLEAN_ENV)
shell.call('%s checkout %s' % (GIT, branch_name), local_git_dir,
env=CLEAN_ENV)
shell.call('%s reset --hard %s' % (GIT, commit), local_git_dir,
env=CLEAN_ENV)
shell.call('%s clone %s -s -b %s .' % (GIT, local_git_dir,
branch_name),
git_dir, env=CLEAN_ENV)
submodules_update(git_dir, local_git_dir)
def add_remote(git_dir, name, url):
'''
Add a remote to a git repository
@param git_dir: destination path of the git repository
@type git_dir: str
@param name: name of the remote
@type name: str
@param url: url of the remote
@type url: str
'''
try:
shell.call('%s remote add %s %s' % (GIT, name, url), git_dir,
env=CLEAN_ENV)
except:
shell.call('%s remote set-url %s %s' % (GIT, name, url), git_dir,
env=CLEAN_ENV)
def check_line_endings(platform):
'''
Checks if on windows we don't use the automatic line endings conversion
as it breaks everything
@param platform: the host platform
@type platform: L{cerbero.config.Platform}
@return: true if git config is core.autorlf=false
@rtype: bool
'''
if platform != Platform.WINDOWS:
return True
val = shell.check_call('%s config --get core.autocrlf' % GIT, env=CLEAN_ENV)
if ('false' in val.lower()):
return True
return False
def init_directory(git_dir):
'''
Initialize a git repository with the contents
of a directory
@param git_dir: path of the git repository
@type git_dir: str
'''
init(git_dir)
try:
shell.call('%s add --force -A .' % GIT, git_dir, env=CLEAN_ENV)
shell.call('%s commit -m "Initial commit" > /dev/null 2>&1' % GIT,
git_dir, env=CLEAN_ENV)
except:
pass
def apply_patch(patch, git_dir):
'''
Applies a commit patch usign 'git am'
of a directory
@param git_dir: path of the git repository
@type git_dir: str
@param patch: path of the patch file
@type patch: str
'''
shell.call('%s am --ignore-whitespace %s' % (GIT, patch), git_dir,
env=CLEAN_ENV)
| sdroege/cerbero | cerbero/utils/git.py | Python | lgpl-2.1 | 8,593 |
#!/usr/bin/python
"""Test of table output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("Up"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"1. Table Where Am I",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 3 bottles of coke'",
" VISIBLE: '3 bottles of coke', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 1 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Next row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"SPEECH OUTPUT: '5 packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"3. Table Where Am I (again)",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Number.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'column 1 of 3'",
"SPEECH OUTPUT: 'row 2 of 5.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("F11"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"4. Turn row reading off",
["BRAILLE LINE: 'Speak cell'",
" VISIBLE: 'Speak cell', cursor=0",
"SPEECH OUTPUT: 'Speak cell'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"5. Table Right to the Product column in the packages of noodles row",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Number column header 5 packages of noodles'",
" VISIBLE: '5 packages of noodles', cursor=1",
"BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header packages of noodles table cell'",
" VISIBLE: 'packages of noodles table cell', cursor=1",
"SPEECH OUTPUT: 'Product column header packages of noodles.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Table up to bottles of coke",
["BRAILLE LINE: 'gtk-demo application Shopping list frame table Product column header bottles of coke table cell'",
" VISIBLE: 'bottles of coke table cell', cursor=1",
"SPEECH OUTPUT: 'bottles of coke.'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| chrys87/orca-beep | test/keystrokes/gtk-demo/role_table.py | Python | lgpl-2.1 | 3,344 |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2019 Sergey Astanin
# https://bitbucket.org/astanin/python-tabulate
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
import math
if python_version_tuple() >= ("3", "3", "0"):
from collections.abc import Iterable
else:
from collections import Iterable
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long # noqa
_float_type = float
_text_type = unicode # noqa
_binary_type = str
def _is_file(f):
return isinstance(f, file) # noqa
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.4"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT = "g"
_DEFAULT_MISSINGVAL = ""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column
# headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w)
for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."}
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(
celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns,
header=''):
alignment = {"left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">'}
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("â•’", "â•", "╤", "â••"),
linebelowheader=Line("╞", "â•", "╪", "â•¡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "â•", "â•§", "â•›"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"github":
TableFormat(lineabove=Line("|", "-", "|", "|"),
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line(
"{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(
_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs, "||",
header="'''"),
datarow=partial(_moin_row_with_attrs, "||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "",
""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular,
booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}",
"", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
# The table formats for which multiline cells will be folded into subsequent
# table rows. The key is the original format specified at the API. The value is
# the format that will be used to represent the original format.
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
# TODO: Add multiline support for the remaining table formats:
# - mediawiki: Replace \n with <br>
# - moinmoin: TBD
# - youtrack: TBD
# - html: Replace \n with <br>
# - latex*: Use "makecell" package: In header, replace X\nY with
# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y}
# - tsv: TBD
# - textile: Replace \n with <br/> (must be well-formed XML)
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
# ANSI color codes
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
# ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m")
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
""" # noqa
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return (type(string) is inttype or
(isinstance(string, _binary_type) or
isinstance(string, _text_type))
and _isconvertible(inttype, string))
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return (type(string) is _bool_type or
(isinstance(string, (_binary_type, _text_type))
and string in ("True", "False")))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
""" # noqa
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False,
is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment,
has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms)))
for ms in strings]
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s)
for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l)
for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s)
for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0,
_bool_type: 1,
int: 2,
float: 3,
_binary_type: 4,
_text_type: 5}
invtypes = {5: _text_type,
4: _binary_type,
3: float,
2: int,
1: _bool_type,
0: _none_type}
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse)
for s in strings]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value according to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
""" # noqa
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = (has_invisible and
isinstance(val, (_text_type, _binary_type)))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v, row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
# columns have to be transposed
rows = list(izip_longest(*tabular_data.values()))
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a
# pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
# values matrix doesn't need to be transposed
vals = tabular_data.values
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError(
"tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError(
'headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and
not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and
not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default",
disable_numparse=False, colalign=None):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
â•’â•â•â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â••
│ strings │ numbers │
╞â•â•â•â•â•â•â•â•â•â•â•╪â•â•â•â•â•â•â•â•â•â•â•â•¡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘â•â•â•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•›
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
""" # noqa
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be
# escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists,
headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row))
for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend((len(cols)-len(float_formats)) *
[_DEFAULT_FLOATFMT])
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend((len(cols)-len(missing_vals)) *
[_DEFAULT_MISSINGVAL])
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats,
missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
if colalign is not None:
assert isinstance(colalign, Iterable)
for idx, align in enumerate(colalign):
aligns[idx] = align
minwidths = [width_fn(h) + MIN_PADDING
for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars,
is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl)
for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline,
width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns,
is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are
False, and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths,
colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl)))
for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
# do it later, in _append_multiline_row
pad_row = lambda row, _: row # noqa
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns,
fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
""" # noqa
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:A:f:",
["help", "header", "output", "sep=",
"float=", "align=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
colalign = None
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-C", "--colalign"]:
colalign = value.split()
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt,
colalign=colalign), file=file)
if __name__ == "__main__":
_main()
| anlambert/tulip | doc/python/tabulate.py | Python | lgpl-3.0 | 60,926 |
{
"name": "Delivery Sequence",
"vesion": "12.0.1.0.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Custom",
"website": "https://yelizariev.github.io",
"depends": ["delivery"],
"data": ["views.xml"],
"installable": False,
}
| yelizariev/addons-yelizariev | delivery_sequence/__manifest__.py | Python | lgpl-3.0 | 295 |
#!/usr/bin/python3
#from __future__ import print_function
from setuptools import setup, Extension
import sys
import os
import psutil
# monkey-patch for parallel compilation
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = psutil.cpu_count(logical=False) # number of parallel compilations
import multiprocessing.pool
def _single_compile(obj):
try: src, ext = build[obj]
except KeyError: return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
return objects
#import distutils.ccompiler
#distutils.ccompiler.CCompiler.compile=parallelCCompile
''' Note:
to build Boost.Python on Windows with mingw
bjam target-os=windows/python=3.4 toolset=gcc variant=debug,release link=static,shared threading=multi runtime-link=shared cxxflags="-include cmath "
also insert this on top of boost/python.hpp :
#include <cmath> //fix cmath:1096:11: error: '::hypot' has not been declared
'''
def getExtensions():
platform = sys.platform
extensionsList = []
sources = ['src/Genome.cpp',
'src/Innovation.cpp',
'src/NeuralNetwork.cpp',
'src/Parameters.cpp',
'src/PhenotypeBehavior.cpp',
'src/Population.cpp',
'src/Random.cpp',
'src/Species.cpp',
'src/Substrate.cpp',
'src/Utils.cpp']
extra = ['-march=native',
'-mtune=native',
'-g',
]
if platform == 'darwin':
extra += ['-stdlib=libc++',
'-std=c++11',]
else:
extra += ['-std=gnu++11']
is_windows = 'win' in platform and platform != 'darwin'
if is_windows:
extra.append('/EHsc')
else:
extra.append('-w')
prefix = os.getenv('PREFIX')
if prefix and len(prefix) > 0:
extra += ["-I{}/include".format(prefix)]
build_sys = os.getenv('MN_BUILD')
if build_sys is None:
if os.path.exists('_MultiNEAT.cpp'):
sources.insert(0, '_MultiNEAT.cpp')
extra.append('-O3')
extensionsList.extend([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
)
else:
print('Source file is missing and MN_BUILD environment variable is not set.\n'
'Specify either \'cython\' or \'boost\'. Example to build in Linux with Cython:\n'
'\t$ export MN_BUILD=cython')
exit(1)
elif build_sys == 'cython':
from Cython.Build import cythonize
sources.insert(0, '_MultiNEAT.pyx')
extra.append('-O3')
extensionsList.extend(cythonize([Extension('MultiNEAT._MultiNEAT',
sources,
extra_compile_args=extra)],
))
elif build_sys == 'boost':
is_python_2 = sys.version_info[0] < 3
sources.insert(0, 'src/PythonBindings.cpp')
if is_windows:
if is_python_2:
raise RuntimeError("Python prior to version 3 is not supported on Windows due to limits of VC++ compiler version")
libs = ['boost_system', 'boost_serialization']
if is_python_2:
libs += ['boost_python', "boost_numpy"]
else:
# with boost 1.67 you need boost_python3x and boost_numpy3x where x is python version 3.x
libs += ['boost_python36', "boost_numpy36"] # in Ubuntu 14 there is only 'boost_python-py34'
# for Windows with mingw
# libraries= ['libboost_python-mgw48-mt-1_58',
# 'libboost_serialization-mgw48-mt-1_58'],
# include_dirs = ['C:/MinGW/include', 'C:/Users/Peter/Desktop/boost_1_58_0'],
# library_dirs = ['C:/MinGW/lib', 'C:/Users/Peter/Desktop/boost_1_58_0/stage/lib'],
extra.extend(['-DUSE_BOOST_PYTHON', '-DUSE_BOOST_RANDOM', #'-O0',
#'-DVDEBUG',
])
exx = Extension('MultiNEAT._MultiNEAT',
sources,
libraries=libs,
extra_compile_args=extra)
print(dir(exx))
print(exx)
print(exx.extra_compile_args)
extensionsList.append(exx)
else:
raise AttributeError('Unknown tool: {}'.format(build_sys))
return extensionsList
setup(name='multineat',
version='0.5', # Update version in conda/meta.yaml as well
packages=['MultiNEAT'],
ext_modules=getExtensions())
| peter-ch/MultiNEAT | setup.py | Python | lgpl-3.0 | 5,190 |
"""A module for handling and accessing both the in-memory, and on-disk,
representation of a set of routes as a set of segments. Where each segment
specifies its start and end stop ids, and other data (see
topology_shapefile_data_model.py for more."""
import sys
import csv
import re
import operator
import itertools
import misc_utils
import topology_shapefile_data_model as tp_model
########
# Basic route name handling
def get_route_order_key_from_name(route_def):
rname = route_def.short_name
if rname:
# Courtesy http://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
try:
order_key = int(re.findall(r'\d+', rname)[0])
except IndexError:
order_key = rname
else:
order_key = route_def.long_name
return order_key
def get_route_names_sorted(route_names):
# Get an ordered list of route names so we can write in name order,
keyfunc = None
if len(route_names[0]) <= 3:
# Dropping the 'R' for route, for short route names, and sort
# by integer version of remaining string
keyfunc = lambda s: int(s[1:])
else:
# Just sort by the full route name string.
keyfunc = lambda s: s
rnames_sorted = sorted(route_names, key=keyfunc)
return rnames_sorted
########
# Definition of Route_Def and Seg_Reference lightweight classes and basic
# manipulation of them.
class Route_Def:
def __init__(self, route_id, short_name, long_name, dir_names,
ordered_seg_ids, gtfs_origin_id = None):
self.id = route_id
self.gtfs_origin_id = gtfs_origin_id
self.short_name = short_name
self.long_name = long_name
self.dir_names = dir_names
self.ordered_seg_ids = ordered_seg_ids
class Seg_Reference:
"""A small lightweight class for using as an in-memory storage of
key segment topology information, and reference to actual segment
feature in a shapefile layer.
This is designed to save cost of reading actual
shapefile frequently, e.g. for algorithms that need to search and/or
add to segments list a lot."""
def __init__(self, seg_id, first_stop_id, second_stop_id,
route_dist_on_seg=None, routes=None):
self.seg_id = seg_id # Segment ID
self.first_id = first_stop_id
self.second_id = second_stop_id
self.route_dist_on_seg = route_dist_on_seg
if routes is None:
self.routes = []
else:
self.routes = routes
self.seg_ii = None # Index into segments layer shapefile -
class Route_Ext_Info:
"""Class for holding relevant info about extended routes."""
def __init__(self, ext_id, ext_name, ext_type,
exist_r_s_name, exist_r_l_name,
exist_r_connect_stop_gtfs_id, exist_r_first_stop_gtfs_id,
upd_r_short_name, upd_r_long_name, upd_dir_name):
self.ext_id = ext_id
self.ext_name = ext_name
self.ext_type = ext_type
self.exist_r_short_name = exist_r_s_name
self.exist_r_long_name = exist_r_l_name
self.exist_r_connect_stop_gtfs_id = exist_r_connect_stop_gtfs_id
self.exist_r_first_stop_gtfs_id = exist_r_first_stop_gtfs_id
self.upd_r_short_name = upd_r_short_name
self.upd_r_long_name = upd_r_long_name
self.upd_dir_name = upd_dir_name
assert ext_type in tp_model.ROUTE_EXT_ALL_TYPES
assert self.exist_r_connect_stop_gtfs_id is not None
if ext_type == tp_model.ROUTE_EXT_TYPE_NEW:
assert self.exist_r_first_stop_gtfs_id is not None
assert upd_dir_name
return
def get_print_name(route_def):
print_name = misc_utils.get_route_print_name(
route_def.short_name, route_def.long_name)
return print_name
def add_route_to_seg_ref(seg_ref, route_id):
if route_id not in seg_ref.routes:
seg_ref.routes.append(route_id)
return
def seg_has_stops(seg_ref, stop_id_1, stop_id_2):
if seg_ref.first_id == stop_id_1 and \
seg_ref.second_id == stop_id_2 \
or seg_ref.first_id == stop_id_2 and \
seg_ref.second_id == stop_id_1:
return True
return False
def get_seg_dist_km(seg_ref):
if seg_ref is not None:
return seg_ref.route_dist_on_seg / tp_model.ROUTE_DIST_RATIO_TO_KM
else:
print "Warning:- asked for distance of a seg_ref with ID %d, but "\
"route distance hasn't yet been read or calculated for this "\
"seg_ref." % seg_ref.seg_id
return None
def get_other_stop_id(seg_ref, stop_id):
if stop_id == seg_ref.first_id:
return seg_ref.second_id
else:
assert stop_id == seg_ref.second_id
return seg_ref.first_id
#####################
# Basic manipulations on a list of seg_refs or route_defs
def get_seg_ref_with_id(seg_id, seg_refs):
for seg_ref in seg_refs:
if seg_id == seg_ref.seg_id:
return seg_ref
return None
def build_seg_refs_lookup_table(seg_refs):
seg_refs_lookup_table = {}
for seg_ref in seg_refs:
seg_refs_lookup_table[seg_ref.seg_id] = seg_ref
return seg_refs_lookup_table
def find_seg_ref_matching_stops(all_seg_refs, stop_id_1, stop_id_2):
matched_seg_ref = None
for seg_ref in all_seg_refs:
if seg_has_stops(seg_ref, stop_id_1, stop_id_2):
matched_seg_ref = seg_ref
break
return matched_seg_ref
def add_update_seg_ref(start_stop_id, end_stop_id, route_id,
route_dist_on_seg, all_seg_refs, seg_refs_this_route,
possible_route_duplicates=False):
"""Add a new segment to the two pre-existing lists all_seg_refs, and
seg_refs_this_route. If segment already exists, update its route list."""
seg_id = None
new_status = False
seg_ref_to_return = None
matched_seg_ref = find_seg_ref_matching_stops(all_seg_refs, start_stop_id,
end_stop_id)
if matched_seg_ref:
new_status = False
#print "While adding, matched a segment! Seg id = %s, existing "\
# "routes = %s, new route = '%s'" %\
# (matched_seg_ref.seg_id\
# matched_seg_ref.routes,\
# route_id)
add_route_to_seg_ref(matched_seg_ref, route_id)
seg_ref_to_return = matched_seg_ref
if possible_route_duplicates:
# Adding a new defensive case:- don't want to add a segment twice to
# the same route.
matched_in_route = find_seg_ref_matching_stops(seg_refs_this_route,
start_stop_id, end_stop_id)
if not matched_seg_ref:
seg_refs_this_route.append(seg_ref_to_return)
else:
seg_refs_this_route.append(seg_ref_to_return)
else:
new_status = True
# +1 since we want to start counter at 1
seg_id = len(all_seg_refs)+1
new_seg_ref = Seg_Reference(seg_id, start_stop_id, end_stop_id,
route_dist_on_seg, routes = [route_id])
# Its a new segment, so append to the list of all segments.
all_seg_refs.append(new_seg_ref)
seg_ref_to_return = new_seg_ref
seg_refs_this_route.append(seg_ref_to_return)
return seg_ref_to_return, new_status
def route_defs_match_statuses(route_def, route_def2):
match_statuses = []
if route_def.id is not None and route_def2.id is not None:
test = route_def.id == route_def2.id
match_statuses.append(test)
if route_def.short_name and route_def2.short_name:
test = route_def.short_name == route_def2.short_name
match_statuses.append(test)
if route_def.long_name and route_def2.long_name:
test = route_def.long_name == route_def2.long_name
match_statuses.append(test)
match_status = False
# Make sure there is at least one attribute matching, and all match.
if len(match_statuses) >= 1 and False not in match_statuses:
match_status = True
return match_status
def get_matching_route_defs(route_defs, search_route_def):
matching_route_defs = []
for rdef in route_defs:
if route_defs_match_statuses(rdef, search_route_def):
matching_route_defs.append(rdef)
return matching_route_defs
def route_def_matches_gtfs_route(route_def, gtfs_route):
match_statuses = []
if route_def.id is not None:
test = route_def.id == gtfs_route.route_id
match_statuses.append(test)
if route_def.short_name:
test = route_def.short_name == gtfs_route.route_short_name
match_statuses.append(test)
if route_def.long_name:
test = route_def.long_name == gtfs_route.route_long_name
match_statuses.append(test)
match_status = False
# Make sure there is at least one attribute matching, and all match.
if len(match_statuses) >= 1 and False not in match_statuses:
match_status = True
return match_status
def get_gtfs_route_ids_matching_route_defs(route_defs_to_match, gtfs_routes):
route_defs_to_check_match = zip(route_defs_to_match,
itertools.count(0))
matching_gtfs_ids = []
route_defs_match_status = [False] * len(route_defs_to_match)
all_matched = False
for gtfs_route in gtfs_routes:
matches = False
# Note we take a copy of list here since we want to remove from it.
for route_def, r_index in route_defs_to_check_match[:]:
if route_def_matches_gtfs_route(route_def, gtfs_route):
route_defs_match_status[r_index] = True
gtfs_route_id = gtfs_route.route_id
if gtfs_route_id not in matching_gtfs_ids:
matching_gtfs_ids.append(gtfs_route_id)
else:
print "Warning: route def just matched, with ID "\
"%s, name %s, already matched a GTFS route. "\
"Ignoring 2nd match." \
% (gtfs_route_id, get_print_name(route_def))
if route_def.id == gtfs_route_id:
# Only remove the route_def in this case, since we matched
# on ID. Otherwise there may be more matches.
route_defs_to_check_match.remove((route_def,r_index))
if len(route_defs_to_check_match) == 0:
all_matched = True
break
if all_matched:
# All routes matched, we're done.
break
for r_index, match_status in enumerate(route_defs_match_status):
if not match_status:
unmatched_r_def = route_defs_to_match[r_index]
print "Warning: route given by ID %s, name %s, didn't match "\
"any GTFS routes in given selection." \
% (unmatched_r_def.id, get_print_name(unmatched_r_def))
return matching_gtfs_ids, route_defs_match_status
def create_route_defs_list_from_route_segs(segs_by_route,
route_dirs, mode_config, r_ids_output_order=None):
"""Turn a dict containing ordered lists of seg references that make up
each route (segs_by_route) and related dictionary of route dir names
(route_dirs) into a list of route definitions. If r_ids_output_order
provided, routes defs in list will be ordered in that order."""
route_defs = []
if r_ids_output_order is None:
r_ids_output_order = segs_by_route.keys()
for r_id in r_ids_output_order:
# Haven't yet implemented ability to create route long names
r_short_name = tp_model.route_name_from_id(r_id, mode_config)
r_long_name = None
rdef = Route_Def(r_id, r_short_name, r_long_name, route_dirs[r_id],
map(operator.attrgetter('seg_id'), segs_by_route[r_id]))
route_defs.append(rdef)
return route_defs
#########
### Functions to do with querying network topology
def find_linking_stop_id(seg1, seg2):
"""Checks if two segments are linked by a common stop. If true, returns
the ID of the linking stop. If they don't link, returns None."""
if seg1.first_id == seg2.first_id or seg1.first_id == seg2.second_id:
return seg1.first_id
elif seg1.second_id == seg2.first_id or seg1.second_id == seg2.second_id:
return seg1.second_id
return None
def find_non_linking_stop_id(seg1, seg2):
"""Find the stop in seg1 that doesn't link to seg2."""
if seg1.first_id == seg2.first_id or seg1.first_id == seg2.second_id:
return seg1.second_id
elif seg1.second_id == seg2.first_id or seg1.second_id == seg2.second_id:
return seg1.first_id
return None
def get_stop_order(seg_ref, next_seg_ref):
"""Use the fact that for two segments, in the first segment, there must be
a matching stop with the 2nd segment. Return the IDs of the 1st and 2nd
stops in the first segment."""
linking_stop_id = find_linking_stop_id(seg_ref, next_seg_ref)
if linking_stop_id is None:
print "Error, in segment with id %d, next seg id is %d, "\
"stop a is #%d, stop b is #%d, "\
"next seg stop a is #%d, stop b is #%d, "\
"couldn't work out stop order."\
% (seg_ref.seg_id, next_seg_ref.seg_id, \
seg_ref.first_id, seg_ref.second_id, \
next_seg_ref.first_id, next_seg_ref.second_id)
sys.exit(1)
else:
first_stop_id = get_other_stop_id(seg_ref, linking_stop_id)
second_stop_id = linking_stop_id
return first_stop_id, second_stop_id
def get_stop_ids_in_travel_dir(route_seg_refs, seg_ii, dir_index):
"""Returns the stop ids of segment ii in route_route_seg_refs given
order of travel by dir_index. (Assumes route_seg_refs ordered in
direction of travel of dir_index 0.)"""
seg_ref = route_seg_refs[seg_ii]
assert seg_ii >= 0 and seg_ii <= len(route_seg_refs) - 1
if dir_index == 0:
if seg_ii < len(route_seg_refs) - 1:
stop_ids = get_stop_order(seg_ref,
route_seg_refs[seg_ii+1])
else:
# Special case for last seg - need to use prev seg.
linking_id = find_linking_stop_id(seg_ref,
route_seg_refs[seg_ii-1])
other_id = get_other_stop_id(seg_ref, linking_id)
stop_ids = (linking_id, other_id)
else:
if seg_ii > 0:
stop_ids = get_stop_order(seg_ref,
route_seg_refs[seg_ii-1])
else:
# Special case for first seg - need to use next seg.
linking_id = find_linking_stop_id(seg_ref,
route_seg_refs[seg_ii+1])
other_id = get_other_stop_id(seg_ref, linking_id)
# Remember we're going 'backwards' in this case
stop_ids = (linking_id, other_id)
return stop_ids
def build_seg_links(route_seg_refs):
"""Create a dictionary, which for each segment ID, gives the list
of other segments linked to that id via a common stop."""
seg_links = {}
for seg in route_seg_refs:
seg_links[seg.seg_id] = []
for ii, seg in enumerate(route_seg_refs[:-1]):
for other_seg in route_seg_refs[ii+1:]:
if find_linking_stop_id(seg, other_seg) is not None:
seg_links[seg.seg_id].append(other_seg.seg_id)
seg_links[other_seg.seg_id].append(seg.seg_id)
return seg_links
def order_segs_based_on_links(route_seg_refs, seg_links):
"""Construct and ordered list of all segments within a route
(given in list route_seg_refs), based on their links via common stops."""
# Ok: start with one of the segments that only has one link
start_seg_id = None
for seg_id, links in seg_links.iteritems():
if len(links) == 1:
start_seg_id = seg_id
break
if start_seg_id is None:
print "Error: no segment with 1 link."
sys.exit(1)
ordered_seg_refs = [get_seg_ref_with_id(start_seg_id, route_seg_refs)]
prev_seg_id = start_seg_id
curr_seg_id = seg_links[start_seg_id][0]
while True:
curr_seg_ref = get_seg_ref_with_id(curr_seg_id, route_seg_refs)
ordered_seg_refs.append(curr_seg_ref)
links = seg_links[curr_seg_id]
if len(links) > 2:
print "Error, segment %d is linked to %d other segments %s" %\
(currseg, len(links), links)
sys.exit(1)
if len(links) == 1:
# We have reached the final segment in the route.
break
next_seg_id = None
for link_seg_id in links:
if link_seg_id != prev_seg_id:
next_seg_id = link_seg_id
assert next_seg_id is not None
prev_seg_id = curr_seg_id
curr_seg_id = next_seg_id
if len(route_seg_refs) != len(ordered_seg_refs):
print "Error: total # segments for this route is %d, but only "\
"found a linked chain of %d segments." \
% (len(route_seg_refs), len(ordered_seg_refs))
unlinked_seg_ids = []
for seg in route_seg_refs:
if get_seg_ref_with_id(seg.seg_id, route_seg_refs) is None:
unlinked_seg_ids.append(seg.seg_id)
print "Unlinked segment IDs: %s" % unlinked_seg_ids
sys.exit(1)
return ordered_seg_refs
def get_set_of_stops_in_route_so_far(segs_so_far):
stop_ids_in_route_so_far = map(operator.attrgetter('first_id'),
segs_so_far)
stop_ids_in_route_so_far += map(operator.attrgetter('second_id'),
segs_so_far)
stop_ids_in_route_so_far = set(stop_ids_in_route_so_far)
return stop_ids_in_route_so_far
def get_seg_id_with_shortest_dist(link_seg_ids, seg_refs,
link_dest_stop_ids_disallowed):
# Trying algorithm of choosing segment with shortest distance.
min_direct_dist = float("inf")
min_dist_seg_id = None
for link_seg_id in link_seg_ids:
link_seg = get_seg_ref_with_id(link_seg_id, seg_refs)
if link_seg.first_id in link_dest_stop_ids_disallowed \
or link_seg.second_id in link_dest_stop_ids_disallowed:
continue
if link_seg.route_dist_on_seg < min_direct_dist:
min_direct_dist = link_seg.route_dist_on_seg
min_dist_seg_id = link_seg_id
return min_dist_seg_id
def get_links_sorted_by_distance(link_seg_ids, seg_refs,
link_dest_stop_ids_disallowed):
links_and_dists = []
for link_seg_id in link_seg_ids:
link_seg = get_seg_ref_with_id(link_seg_id, seg_refs)
if link_seg.first_id in link_dest_stop_ids_disallowed \
or link_seg.second_id in link_dest_stop_ids_disallowed:
continue
links_and_dists.append((link_seg_id, link_seg.route_dist_on_seg))
if links_and_dists:
links_and_dists.sort(key=operator.itemgetter(1))
link_seg_ids_sorted_by_dist = map(operator.itemgetter(0),
links_and_dists)
else:
link_seg_ids_sorted_by_dist = None
return link_seg_ids_sorted_by_dist
def get_seg_id_with_stop_ids(seg_refs, stop_id_a, stop_id_b):
seg_ids_that_include_stop_ids = []
for seg in seg_refs:
if stop_id_a in (seg.first_id, seg.second_id) \
and stop_id_b in (seg.first_id, seg.second_id):
seg_ids_that_include_stop_ids.append(seg.seg_id)
assert len(seg_ids_that_include_stop_ids) <= 1
if not seg_ids_that_include_stop_ids:
return None
else:
return seg_ids_that_include_stop_ids[0]
def get_seg_ids_that_include_stop_id(seg_refs, stop_id):
seg_ids_that_include_stop_id = []
for seg_ref in seg_refs:
if stop_id in (seg_ref.first_id, seg_ref.second_id):
seg_ids_that_include_stop_id.append(seg_ref.seg_id)
return seg_ids_that_include_stop_id
def get_seg_ids_with_minimum_links(seg_ids, seg_links):
min_link_segs = []
min_links = min([len(seg_links[seg_id]) for seg_id in seg_ids])
for seg_id in seg_ids:
if len(seg_links[seg_id]) == min_links:
min_link_segs.append(seg_id)
return min_link_segs, min_links
def get_seg_refs_for_ordered_stop_ids(stop_ids, seg_refs):
ordered_segs = []
for stop_id_a, stop_id_b in misc_utils.pairs(stop_ids):
seg_id = get_seg_id_with_stop_ids(seg_refs,
stop_id_a, stop_id_b)
if seg_id is None:
print "WARNING:- the pattern being processed contains no "\
"segments with stop pair IDs %d, %d, in list of "\
"ordered stop ids you requested."\
% (stop_id_a, stop_id_b)
ordered_segs = []
break
else:
seg_ref = get_seg_ref_with_id(seg_id, seg_refs)
ordered_segs.append(seg_ref)
return ordered_segs
def get_full_stop_pattern_segs(all_pattern_segs, seg_links,
force_first_stop_ids=None):
"""More advanced function to build a list of segments into a route :-
this time by finding a 'full-stop' pattern linking all the segments.
(This is useful if you're trying to reconstruct a single full-stop pattern
from a set of all segments derived from a GTFS file with varying stop
patterns throughout the day.)
(Note: current implementation is unlikely to deal with branching routes
well. It will follow the branch with the most segments, won't include
other branches.)
Note re alg tuning and force_first_stop_ids argument:- after a fair bit
of effort I was able to make the algorithm produce sensible results for
the 'full stop' version of routes with expresses and a 'city loop' trains
in most cases. However a few cases such as the Belgrave line in Melbourne
are difficult to come up with a good outcome with no initial information.
Therefore there is a force_first_stop_ids argument that allows to force
beginning the segment-chain building algorithm at a particular stop(s), to
help get a good result.
"""
full_stop_pattern_segs = []
all_seg_ids = map(operator.attrgetter('seg_id'), all_pattern_segs)
if len(all_pattern_segs) == 1:
full_stop_pattern_segs = list(all_pattern_segs)
return full_stop_pattern_segs
if force_first_stop_ids and len(force_first_stop_ids) >= 3:
# In this case :- we have at least two segments to start from in a
# given order. Build these then add the longest chain at end.
# We know there's no need to extend/reverse from here.
print "Starting building chain with segs between stops %s ...." \
% (force_first_stop_ids)
full_stop_pattern_segs = get_seg_refs_for_ordered_stop_ids(
force_first_stop_ids, all_pattern_segs)
if not full_stop_pattern_segs: return []
first_link_seg_id = full_stop_pattern_segs.pop().seg_id
print "Added seg IDs b/w these stops: %s - next is %d" \
% (map(operator.attrgetter('seg_id'), full_stop_pattern_segs),\
first_link_seg_id)
seg_chain, chain_len = get_longest_seg_linked_chain(first_link_seg_id,
all_pattern_segs, full_stop_pattern_segs, seg_links, {})
full_stop_pattern_segs += seg_chain
return full_stop_pattern_segs
elif force_first_stop_ids and len(force_first_stop_ids) == 2:
# We've been given req'd first two stops, hence req'd first
# segment. So search all options with this segment in order.
print "Starting building chain with seg between stops %s ...." \
% (force_first_stop_ids)
full_stop_pattern_segs = get_seg_refs_for_ordered_stop_ids(
force_first_stop_ids, all_pattern_segs)
if not full_stop_pattern_segs: return []
first_seg_id = full_stop_pattern_segs[0].seg_id
print "First build seg is #%d" % first_seg_id
link_seg_ids = seg_links[first_seg_id]
link_segs = [get_seg_ref_with_id(seg_id, all_pattern_segs) for \
seg_id in link_seg_ids]
cand_init_link_seg_ids = get_seg_ids_that_include_stop_id(
link_segs, force_first_stop_ids[-1])
# Now we need to find the longest sub-chain for all of these
# init link candidates.
longest_chain = []
for init_link_seg_id in cand_init_link_seg_ids:
seg_chain, chain_len = get_longest_seg_linked_chain(
init_link_seg_id, all_pattern_segs, full_stop_pattern_segs,
seg_links, {})
if chain_len > len(longest_chain):
longest_chain = seg_chain
full_stop_pattern_segs += longest_chain
elif force_first_stop_ids and len(force_first_stop_ids) == 1:
# We have a first stop ID - but don't necessarily know which segment
# this stop belongs to to start at. Need to potentially try
# all combos passing through this stop.
first_stop_id = force_first_stop_ids[0]
print "Forcing start of building chain at stop ID %d" \
% first_stop_id
cand_start_seg_ids = get_seg_ids_that_include_stop_id(
all_pattern_segs, first_stop_id)
start_seg_ids_and_chains = []
for start_seg_id in cand_start_seg_ids:
start_seg_ref = get_seg_ref_with_id(start_seg_id, all_pattern_segs)
other_stop_id = get_other_stop_id(start_seg_ref, first_stop_id)
link_seg_ids = seg_links[start_seg_id]
link_segs = [get_seg_ref_with_id(seg_id, all_pattern_segs) for \
seg_id in link_seg_ids]
# We only want 'forward' links away from the first stop id
# work out longest of these.
cand_init_link_seg_ids = get_seg_ids_that_include_stop_id(
link_segs, other_stop_id)
longest_sub_chain = []
for link_seg_id in cand_init_link_seg_ids:
seg_chain, chain_len = get_longest_seg_linked_chain(
link_seg_id, all_pattern_segs, [start_seg_ref],
seg_links, {})
if chain_len > len(longest_sub_chain):
longest_sub_chain = seg_chain
start_seg_ids_and_chains.append([start_seg_ref] + longest_sub_chain)
# We need to get the longest chain
start_seg_ids_and_chains.sort(key = len)
full_stop_pattern_segs = start_seg_ids_and_chains[0]
else:
# We don't have a forced seg to start at.
# Ok: best bet in this case is search for one of the segments that
# only has one link - and is therefore an end of the route.
possible_reverse_links = False
start_seg_id = None
for seg_id, link_seg_ids in seg_links.iteritems():
if len(link_seg_ids) == 1:
start_seg_id = seg_id
break
if start_seg_id is not None:
print "No start stop specified, so starting with seg #%d "\
"that has only one link." % start_seg_id
else:
print "No start stop specified, and route has no "\
"segments with only one link."
possible_reverse_links = True
# Fallback case.
cand_start_seg_ids, min_links = get_seg_ids_with_minimum_links(
all_seg_ids, seg_links)
print "Minimum links of any seg is %d" % min_links
# Try the 'starts' and 'ends' first in order we read segs for this
# route.
min_dist_from_end = float("inf")
for seg_id in cand_start_seg_ids:
dist_from_end = min(seg_id - 1, len(all_pattern_segs) - seg_id)
if dist_from_end < min_dist_from_end:
min_dist_from_end = dist_from_end
start_seg_id = seg_id
if dist_from_end == 0:
break
print "Starting with seg to have this # of links closest to "\
"start or end = seg #%s" % start_seg_id
# Ok:- we've chosen a start seg ID, now need to choose best link seg
#print "Added start seg %d." % start_seg_id
start_seg_ref = get_seg_ref_with_id(start_seg_id, all_pattern_segs)
full_stop_pattern_segs.append(start_seg_ref)
init_link_seg_ids = seg_links[start_seg_id]
first_link_seg_id = get_seg_id_with_shortest_dist(init_link_seg_ids,
all_pattern_segs, [])
seg_chain, chain_len = get_longest_seg_linked_chain(first_link_seg_id,
all_pattern_segs, full_stop_pattern_segs, seg_links, {})
full_stop_pattern_segs += seg_chain
if possible_reverse_links:
# We want to try building other possible 'reverse' chains, given
# with this flag we may have started in the middle of a route.
rem_init_link_seg_ids = list(init_link_seg_ids)
rem_init_link_seg_ids.remove(first_link_seg_id)
first_stop_id = find_non_linking_stop_id(full_stop_pattern_segs[0],
full_stop_pattern_segs[1])
stop_ids_in_route_so_far = get_set_of_stops_in_route_so_far(
full_stop_pattern_segs)
rev_candidate_link_ids = []
for link_seg_id in rem_init_link_seg_ids:
link_seg_ref = get_seg_ref_with_id(link_seg_id, all_pattern_segs)
if first_stop_id not in \
(link_seg_ref.first_id, link_seg_ref.second_id):
# This must be a 'branch' from the first stop, not a
# possible reverse.
continue
non_link_stop = get_other_stop_id(link_seg_ref, first_stop_id)
# NOTE:- rules out some loops
if non_link_stop not in stop_ids_in_route_so_far:
# we have an unexplored section, not an express into
# already included chain.
rev_candidate_link_ids.append(link_seg_id)
if rev_candidate_link_ids:
print "Calling special reverse case ..."
full_stop_pattern_segs.reverse()
longest_chains_lookup_cache = {}
longest_sub_chain = []
longest_sub_chain_len = 0
for rev_link_seg_id in rev_candidate_link_ids:
seg_sub_chain, sub_chain_len = get_longest_seg_linked_chain(
rev_link_seg_id, all_pattern_segs,
full_stop_pattern_segs, seg_links,
#longest_chains_lookup_cache)
{})
if sub_chain_len > longest_sub_chain_len:
longest_sub_chain = seg_sub_chain
longest_sub_chain_len = sub_chain_len
full_stop_pattern_segs += longest_sub_chain
return full_stop_pattern_segs
def get_longest_seg_linked_chain(init_seg_id, all_segs, segs_visited_so_far,
seg_links, longest_chains_lookup_cache):
# Special case for having visited all segments - esp for 1-segment routes
if len(all_segs) == len(segs_visited_so_far):
return [], 0
seg_chain = []
init_seg_ref = get_seg_ref_with_id(init_seg_id, all_segs)
prev_seg_ref = segs_visited_so_far[-1]
prev_seg_id = prev_seg_ref.seg_id
prev_stop_id = find_linking_stop_id(prev_seg_ref, init_seg_ref)
stop_ids_in_route_so_far = get_set_of_stops_in_route_so_far(
segs_visited_so_far)
curr_seg_id = init_seg_id
while True:
curr_seg_ref = get_seg_ref_with_id(curr_seg_id, all_segs)
assert curr_seg_id not in map(operator.attrgetter('seg_id'), seg_chain)
seg_chain.append(curr_seg_ref)
#print "Appended seg %d to sub chain. - sub chain is now %s." % \
# (curr_seg_id, map(operator.attrgetter('seg_id'), seg_chain))
curr_stop_id = find_non_linking_stop_id(curr_seg_ref, prev_seg_ref)
stop_ids_in_route_so_far.add(curr_stop_id)
link_seg_ids = seg_links[curr_seg_id]
next_seg_id = None
if len(link_seg_ids) == 1:
# We have reached the final segment in the route.
break
elif len(link_seg_ids) == 2:
for link_seg_id in link_seg_ids:
if link_seg_id != prev_seg_id:
next_seg_id = link_seg_id
assert next_seg_id is not None
next_seg_ref = get_seg_ref_with_id(next_seg_id, all_segs)
linking_stop_id = find_linking_stop_id(next_seg_ref, curr_seg_ref)
# Need this check to deal with single-segment branch cases.
if linking_stop_id == prev_stop_id:
#print "Warning:- single 'forward' link found from seg %d "\
# "to seg %d, but this next seg is actually a branch "\
# "from previous link. So breaking here."\
# % (curr_seg_id, next_seg_id)
break
# We need this extra check to avoid loops back into existing
# stops.
next_stop_id = get_other_stop_id(next_seg_ref, linking_stop_id)
if next_stop_id in stop_ids_in_route_so_far:
#print "Warning:- single forward link found from seg %d "\
# "to seg %d, but this next seg links back to an "\
# "already visited stop. So breaking here."\
# % (curr_seg_id, next_seg_id)
break
else:
# This means there is either a 'branch', 'express' section,
# or a loop.
fwd_link_seg_ids = list(link_seg_ids)
fwd_link_seg_ids.remove(prev_seg_id)
stops_disallowed = set(stop_ids_in_route_so_far)
stops_disallowed.remove(curr_stop_id)
fwd_link_seg_ids = get_links_sorted_by_distance(fwd_link_seg_ids,
all_segs, stops_disallowed)
if fwd_link_seg_ids is None:
#print "Warning: multiple links from current segment, but "\
# "all of them looped back to an already used stop. "\
# "So breaking here (last added seg ID was %d)."\
# % curr_seg_id
break
longest_sub_chain = []
longest_sub_chain_len = 0
#print "*In recursive part*, curr_seg_id=%d" % curr_seg_id
updated_segs_visited_so_far = segs_visited_so_far + seg_chain
# We only want to cache lookup chains at the same depth level
sub_longest_chains_lookup_cache = {}
for link_seg_id in fwd_link_seg_ids:
try:
sub_seg_chain = longest_chains_lookup_cache[link_seg_id]
sub_chain_len = len(sub_seg_chain)
#print "(lookup answer from cache for link %d was %d)" \
# % (link_seg_id, sub_chain_len)
except KeyError:
# Recursive call, to try all possible branches.
#print "*Launching recursive call on link seg id %d" \
# % link_seg_id
sub_seg_chain, sub_chain_len = get_longest_seg_linked_chain(
link_seg_id, all_segs,
updated_segs_visited_so_far, seg_links,
#sub_longest_chains_lookup_cache)
{})
#print "...Sub-chain from link %d was %d long" \
# % (link_seg_id, sub_chain_len)
if sub_chain_len > longest_sub_chain_len:
longest_sub_chain = sub_seg_chain
longest_sub_chain_len = sub_chain_len
assert len(set(longest_sub_chain)) == len(longest_sub_chain)
seg_chain += longest_sub_chain
assert len(set(seg_chain)) == len(seg_chain)
break
# Defensive check
if next_seg_id in map(operator.attrgetter('seg_id'),
segs_visited_so_far + seg_chain):
#print "Warning, we found a loop in segments while constructing "\
# "full-stop pattern - breaking with loop seg id being %d."\
# % next_seg_id
break
prev_seg_id = curr_seg_id
prev_stop_id = curr_stop_id
prev_seg_ref = curr_seg_ref
curr_seg_id = next_seg_id
longest_chains_lookup_cache[init_seg_id] = seg_chain
#print "sub-chain of ids calc was %s" \
# % (map(operator.attrgetter('seg_id'), seg_chain))
assert len(set(seg_chain)) == len(seg_chain)
all_segs_thus_far = segs_visited_so_far + seg_chain
assert len(set(all_segs_thus_far)) == \
len(all_segs_thus_far)
stop_ids_in_route_thus_far = get_set_of_stops_in_route_so_far(
all_segs_thus_far)
assert len(set(stop_ids_in_route_thus_far)) == \
len(stop_ids_in_route_thus_far)
return seg_chain, len(seg_chain)
def order_all_route_segments(all_segs_by_route, r_ids_sorted=None):
# Now order each route properly ...
# for each route - find unique stop names
if r_ids_sorted == None:
r_ids_sorted = sorted(all_segs_by_route.keys())
segs_by_routes_ordered = {}
for r_id in r_ids_sorted:
print "Ordering segments by traversal for route ID %d:" \
% (r_id)
route_seg_refs = all_segs_by_route[r_id]
if len(route_seg_refs) == 1:
segs_by_routes_ordered[r_id] = route_seg_refs
else:
seg_links = build_seg_links(route_seg_refs)
ordered_seg_refs = order_segs_based_on_links(route_seg_refs,
seg_links)
segs_by_routes_ordered[r_id] = ordered_seg_refs
assert len(segs_by_routes_ordered) == len(all_segs_by_route)
return segs_by_routes_ordered
def create_basic_route_dir_names(all_segs_by_route, mode_config):
"""Creating basic direction names for routes :- based on first and last
stop ids and names in each route."""
route_dir_names = {}
for r_id, route_seg_refs in all_segs_by_route.iteritems():
if len(route_seg_refs) == 1:
start_stop = route_seg_refs[0].first_id
end_stop = route_seg_refs[0].second_id
else:
first_seg, second_seg = route_seg_refs[0], route_seg_refs[1]
start_stop = find_non_linking_stop_id(first_seg, second_seg)
if start_stop is None:
print "Error in working out directions for route ID %d:- "\
"first and second segments don't link via a common stop!"\
% r_id
sys.exit(1)
last_seg = route_seg_refs[-1]
second_last_seg = route_seg_refs[-2]
end_stop = find_non_linking_stop_id(last_seg, second_last_seg)
if end_stop is None:
print "Error in working out directions for route ID %d:- "\
"last and second last segments don't link via a "\
"common stop!"\
% r_id
sys.exit(1)
first_stop_name = tp_model.stop_default_name_from_id(start_stop,
mode_config)
last_stop_name = tp_model.stop_default_name_from_id(end_stop,
mode_config)
dir1 = "%s->%s" % (first_stop_name, last_stop_name)
dir2 = "%s->%s" % (last_stop_name, first_stop_name)
route_dir_names[r_id] = (dir1, dir2)
assert len(all_segs_by_route) == len(route_dir_names)
return route_dir_names
def extract_stop_list_along_route(ordered_seg_refs):
stop_ids = []
if len(ordered_seg_refs) == 1:
# special case for a route with only one segment.
seg_ref = ordered_seg_refs[0]
stop_ids = [seg_ref.first_id, seg_ref.second_id]
else:
first_stop_id, second_stop_id = get_stop_order(
ordered_seg_refs[0], ordered_seg_refs[1])
stop_ids.append(first_stop_id)
prev_second_stop_id = second_stop_id
for seg_ref in ordered_seg_refs[1:]:
first_stop_id = prev_second_stop_id
second_stop_id = get_other_stop_id(seg_ref, first_stop_id)
stop_ids.append(first_stop_id)
prev_second_stop_id = second_stop_id
# Finally, add second stop of final segment.
stop_ids.append(second_stop_id)
return stop_ids
########################################
# I/O from segments and stops shapefiles
def seg_ref_from_feature(seg_feature):
seg_id = int(seg_feature.GetField(tp_model.SEG_ID_FIELD))
stop_id_a, stop_id_b = tp_model.get_stop_ids_of_seg(seg_feature)
route_dist_on_seg = float(seg_feature.GetField(
tp_model.SEG_ROUTE_DIST_FIELD))
seg_rlist = tp_model.get_routes_on_seg(seg_feature)
seg_ref = Seg_Reference(seg_id, stop_id_a, stop_id_b,
route_dist_on_seg=route_dist_on_seg, routes=seg_rlist)
return seg_ref
def route_ext_from_feature(route_ext_feat):
ext_id = route_ext_feat.GetField(tp_model.ROUTE_EXT_ID_FIELD)
ext_name = route_ext_feat.GetField(tp_model.ROUTE_EXT_NAME_FIELD)
ext_type = route_ext_feat.GetField(tp_model.ROUTE_EXT_TYPE_FIELD)
exist_r_s_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_EXIST_S_NAME_FIELD)
exist_r_l_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_EXIST_L_NAME_FIELD)
exist_r_connect_stop_gtfs_id = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_CONNECTING_STOP_FIELD)
exist_r_first_stop_gtfs_id = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_FIRST_STOP_FIELD)
if not exist_r_first_stop_gtfs_id:
exist_r_first_stop_gtfs_id = None
upd_r_short_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_S_NAME_FIELD)
upd_r_long_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_L_NAME_FIELD)
upd_dir_name = \
route_ext_feat.GetField(tp_model.ROUTE_EXT_UPD_DIR_NAME_FIELD)
route_ext_info = Route_Ext_Info(
ext_id, ext_name, ext_type,
exist_r_s_name, exist_r_l_name,
exist_r_connect_stop_gtfs_id, exist_r_first_stop_gtfs_id,
upd_r_short_name, upd_r_long_name, upd_dir_name)
return route_ext_info
def read_route_ext_infos(route_exts_lyr):
route_ext_infos = []
for r_ext_i, route_ext_feat in enumerate(route_exts_lyr):
route_ext_info = route_ext_from_feature(route_ext_feat)
route_ext_infos.append(route_ext_info)
route_exts_lyr.ResetReading()
return route_ext_infos
def get_routes_and_segments(segs_lyr):
all_routes = {}
for feature in segs_lyr:
seg_ref = seg_ref_from_feature(feature)
for route_id in seg_ref.routes:
if route_id not in all_routes:
all_routes[route_id] = [seg_ref]
else:
all_routes[route_id].append(seg_ref)
#for r_id, rsegs in all_routes.iteritems():
# print "For Route ID '%s': segments are %s" % (r_id, rsegs)
segs_lyr.ResetReading()
return all_routes
def get_all_seg_refs(segs_lyr):
all_seg_refs = []
for feature in segs_lyr:
seg_ref = seg_ref_from_feature(feature)
all_seg_refs.append(seg_ref)
segs_lyr.ResetReading()
return all_seg_refs
def create_ordered_seg_refs_from_ids(ordered_seg_ids, segs_lookup_table):
ordered_seg_refs = []
for seg_id in ordered_seg_ids:
seg_feature = segs_lookup_table[seg_id]
seg_ref = seg_ref_from_feature(seg_feature)
ordered_seg_refs.append(seg_ref)
return ordered_seg_refs
def write_seg_ref_to_shp_file(seg_ref, segments_lyr, stop_feat_a, stop_feat_b,
stops_srs, mode_config):
# Create line geometry based on two stops.
seg_geom = tp_model.create_seg_geom_from_stop_pair(stop_feat_a,
stop_feat_b, stops_srs)
seg_ii = tp_model.add_segment(segments_lyr,
seg_ref.seg_id, seg_ref.routes, seg_ref.first_id, seg_ref.second_id,
seg_ref.route_dist_on_seg, seg_geom, mode_config)
seg_geom.Destroy()
return seg_ii
def write_segments_to_shp_file(segments_lyr, input_stops_lyr, seg_refs,
mode_config):
"""Write all segments defined by input seg_refs list to the segments_lyr.
Geometries of segments defined by stop pairs in input_stops_lyr.
"""
print "Writing segment references to shapefile:"
stops_srs = input_stops_lyr.GetSpatialRef()
# Build lookup table by stop ID into stops layer - for speed
stops_lookup_dict = tp_model.build_stops_lookup_table(input_stops_lyr)
for seg_ref in seg_refs:
# look up corresponding stops in lookup table, and build geometry
stop_feat_a = stops_lookup_dict[seg_ref.first_id]
stop_feat_b = stops_lookup_dict[seg_ref.second_id]
seg_ii = write_seg_ref_to_shp_file(seg_ref, segments_lyr,
stop_feat_a, stop_feat_b, stops_srs, mode_config)
print "...done writing."
return
############################
# Route Ext Info processing.
def print_route_ext_infos(route_ext_infos, indent=4):
for re in route_ext_infos:
print " " * indent + "Ext id:%s, '%s', of type %s"\
% (re.ext_id, re.ext_name, re.ext_type)
print " " * indent * 2 + "connects to existing route '%s' "\
"('%s'), at GTFS stop ID %s" \
% (re.exist_r_short_name, re.exist_r_long_name, \
re.exist_r_connect_stop_gtfs_id)
if re.ext_type == tp_model.ROUTE_EXT_TYPE_NEW:
print " " * indent * 2 + "(new route will copy starting from "\
"stop with GTFS ID %s)"\
% (re.exist_r_first_stop_gtfs_id)
print " " * indent * 2 + "will update r name to '%s':'%s' "\
"and new/updated dir name as '%s'." \
% (re.upd_r_short_name, re.upd_r_long_name, \
re.upd_dir_name)
return
def get_matching_existing_route_info(
route_defs, segs_lyr, segs_lookup_table, stops_lyr,
route_ext_info):
# Find the route def, stops, etc of matching route in existing topology
search_route_def = Route_Def(
None,
route_ext_info.exist_r_short_name,
route_ext_info.exist_r_long_name,
None, None)
matching_r_defs = get_matching_route_defs(route_defs,
search_route_def)
if len(matching_r_defs) == 0:
print "Error:- for route extension %s with s name %s, l name %s: "\
"no matching existing routes!" \
% (route_ext_info.ext_name, route_ext_info.exist_r_short_name,\
route_ext_info.exist_r_long_name)
sys.exit(1)
elif len(matching_r_defs) > 1:
print "Error:- for route extension %s with s name %s, l name %s: "\
"matched multiple existing routes!" \
% (route_ext_info.ext_name, route_ext_info.exist_r_short_name,\
route_ext_info.exist_r_long_name)
sys.exit(1)
r_def_to_extend = matching_r_defs[0]
seg_refs_along_route = create_ordered_seg_refs_from_ids(
r_def_to_extend.ordered_seg_ids, segs_lookup_table)
stop_ids_along_route = extract_stop_list_along_route(
seg_refs_along_route)
connect_stop_id = tp_model.get_stop_id_with_gtfs_id(
stops_lyr, route_ext_info.exist_r_connect_stop_gtfs_id)
if connect_stop_id is None:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s :- couldn't find an existing stop with "\
"this GTFS ID."\
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit()
elif connect_stop_id not in stop_ids_along_route:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s exists, but not found in route to extend." \
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit()
if route_ext_info.ext_type == tp_model.ROUTE_EXT_TYPE_EXTENSION:
if connect_stop_id == stop_ids_along_route[-1]:
ext_dir_id = 0
elif connect_stop_id == stop_ids_along_route[0]:
ext_dir_id = -1
else:
print "Error:- extension route with connecting stop spec. "\
"with GTFS ID %s not found at end of route to extend."\
% (route_ext_info.exist_r_connect_stop_gtfs_id)
sys.exit(1)
# For new routes, the connecting stop can legitimately be
# anywhere along the route.
orig_route_first_stop_id = tp_model.get_stop_id_with_gtfs_id(
stops_lyr, route_ext_info.exist_r_first_stop_gtfs_id)
return r_def_to_extend, seg_refs_along_route, stop_ids_along_route, \
connect_stop_id, orig_route_first_stop_id
def get_route_infos_to_extend(route_ext_infos, route_defs, segs_lyr,
segs_lookup_table, stops_lyr):
"""Returns the existing_route_infos_to_extend in the form:-
(r_def_to_extend, seg_refs_along_route, stop_ids_along_route,
connect_stop_id)"""
existing_route_infos_to_extend = []
for r_ext_info in route_ext_infos:
route_info_to_extend = get_matching_existing_route_info(
route_defs, segs_lyr, segs_lookup_table, stops_lyr,
r_ext_info)
existing_route_infos_to_extend.append(route_info_to_extend)
return existing_route_infos_to_extend
###############################
# I/O from route definition CSV
# Old (Pre 15 Oct 2014) headers of route_defs.csv
ROUTE_CSV_HEADERS_00 = ['Route', 'dir1', 'dir2', 'Segments']
# New headers:
ROUTE_CSV_HEADERS_01 = ['route_id', 'route_short_name', 'route_long_name',
'gtfs_id', 'dir1', 'dir2', 'Segments']
def read_route_defs(csv_file_name, do_sort=True):
"""Reads a CSV of route_defs, into a list of 'route_defs'.
Each route_def is a dictionary, with following entries:
name: name of route.
directions: a tuple of two strings, the route directions.
segments: a list of (ordered) route segments IDs."""
route_defs = []
try:
csv_file = open(csv_file_name, 'r')
except IOError:
print "Error, route mapping CSV file given, %s , failed to open." \
% (csv_file_name)
sys.exit(1)
dict_reader = csv.DictReader(csv_file, delimiter=';', quotechar="'")
# Check old vs new format
if 'Route' in dict_reader.fieldnames:
format_version = "00"
else:
format_version = "01"
for ii, row in enumerate(dict_reader):
if format_version == "00":
r_id = ii
r_short_name = row['Route']
r_long_name = None
else:
r_id = int(row['route_id'])
r_short_name = row['route_short_name']
if r_short_name == 'None' or len(r_short_name) == 0:
r_short_name = None
r_long_name = row['route_long_name']
if r_long_name == 'None' or len(r_long_name) == 0:
r_long_name = None
assert r_short_name or r_long_name
try:
r_gtfs_id = row['gtfs_id']
if r_gtfs_id == 'None' or len(r_gtfs_id) == 0:
r_gtfs_id = None
except KeyError:
r_gtfs_id = None
dir1 = row['dir1']
dir2 = row['dir2']
segments_str = row['Segments'].split(',')
seg_ids = [int(segstr) for segstr in segments_str]
route_def = Route_Def(r_id, r_short_name, r_long_name,
(dir1, dir2), seg_ids, gtfs_origin_id=r_gtfs_id)
route_defs.append(route_def)
if do_sort == True:
route_defs.sort(key=get_route_order_key_from_name)
csv_file.close()
return route_defs
def write_route_defs(csv_file_name, route_defs):
if sys.version_info >= (3,0,0):
routesfile = open(csv_file_name, 'w', newline='')
else:
routesfile = open(csv_file_name, 'wb')
rwriter = csv.writer(routesfile, delimiter=';')
rwriter.writerow(ROUTE_CSV_HEADERS_01)
for rdef in route_defs:
dirs = tuple(rdef.dir_names)
if not dirs:
print "Warning:- no dirs listed for route %s to write. "\
"writing as empty dirs." % rdef.short_name
dirs = ("", "")
if len(dirs) == 1:
print "Warning:- only one dir listed for route %s to write. "\
"writing other dir as empty." % rdef.short_name
dirs = (dirs[0], "")
seg_str_all = ','.join(map(str, rdef.ordered_seg_ids))
rwriter.writerow([rdef.id, rdef.short_name, rdef.long_name,
rdef.gtfs_origin_id, dirs[0], dirs[1], seg_str_all])
routesfile.close()
print "Wrote output to %s" % (csv_file_name)
return
| PatSunter/SimpleGTFSCreator | route_segs.py | Python | lgpl-3.0 | 52,470 |
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2019 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Curses interface class."""
from __future__ import unicode_literals
import sys
from glances.compat import nativestr, u, itervalues, enable, disable
from glances.globals import MACOS, WINDOWS
from glances.logger import logger
from glances.events import glances_events
from glances.processes import glances_processes, sort_processes_key_list
from glances.outputs.glances_unicode import unicode_message
from glances.timer import Timer
# Import curses library for "normal" operating system
try:
import curses
import curses.panel
from curses.textpad import Textbox
except ImportError:
logger.critical("Curses module not found. Glances cannot start in standalone mode.")
if WINDOWS:
logger.critical("For Windows you can try installing windows-curses with pip install.")
sys.exit(1)
class _GlancesCurses(object):
"""This class manages the curses display (and key pressed).
Note: It is a private class, use GlancesCursesClient or GlancesCursesBrowser.
"""
_hotkeys = {
# 'ENTER' > Edit the process filter
'0': {'switch': 'disable_irix'},
'1': {'switch': 'percpu'},
'2': {'switch': 'disable_left_sidebar'},
'3': {'switch': 'disable_quicklook'},
# '4' > Enable or disable quicklook
# '5' > Enable or disable top menu
'6': {'switch': 'meangpu'},
'9': {'switch': 'theme_white'},
'/': {'switch': 'process_short_name'},
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'switch': 'diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
'D': {'switch': 'disable_docker'},
# 'e' > Enable/Disable process extended
# 'E' > Erase the process filter
# 'f' > Show/hide fs / folder stats
'F': {'switch': 'fs_free_space'},
'g': {'switch': 'generate_graph'},
'G': {'switch': 'disable_gpu'},
'h': {'switch': 'help_tag'},
'i': {'sort_key': 'io_counters'},
'I': {'switch': 'disable_ip'},
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
'N': {'switch': 'disable_now'},
'p': {'sort_key': 'name'},
'P': {'switch': 'disable_ports'},
# 'q' or ESCAPE > Quit
'Q': {'switch': 'enable_irq'},
'r': {'switch': 'disable_smart'},
'R': {'switch': 'disable_raid'},
's': {'switch': 'disable_sensors'},
'S': {'switch': 'sparkline'},
't': {'sort_key': 'cpu_times'},
'T': {'switch': 'network_sum'},
'u': {'sort_key': 'username'},
'U': {'switch': 'network_cumul'},
# 'w' > Delete finished warning logs
'W': {'switch': 'disable_wifi'},
# 'x' > Delete finished warning and critical logs
# 'z' > Enable or disable processes
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
_sort_loop = sort_processes_key_list
# Define top menu
_top = ['quicklook', 'cpu', 'percpu', 'gpu', 'mem', 'memswap', 'load']
_quicklook_max_width = 68
# Define left sidebar
_left_sidebar = [
'network',
'connections',
'wifi',
'ports',
'diskio',
'fs',
'irq',
'folders',
'raid',
'smart',
'sensors',
'now',
]
_left_sidebar_min_width = 23
_left_sidebar_max_width = 34
# Define right sidebar
_right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert']
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
# Init windows positions
self.term_w = 80
self.term_h = 24
# Space between stats
self.space_between_column = 3
self.space_between_line = 2
# Init the curses screen
self.screen = curses.initscr()
if not self.screen:
logger.critical("Cannot init the curses library.\n")
sys.exit(1)
# Load the 'outputs' section of the configuration file
# - Init the theme (default is black)
self.theme = {'name': 'black'}
# Load configuration file
self.load_config(config)
# Init cursor
self._init_cursor()
# Init the colors
self._init_colors()
# Init main window
self.term_window = self.screen.subwin(0, 0)
# Init edit filter tag
self.edit_filter = False
# Init kill process tag
self.kill_process = False
# Init the process min/max reset
self.args.reset_minmax_tag = False
# Init cursor
self.args.cursor_position = 0
# Catch key pressed with non blocking mode
self.term_window.keypad(1)
self.term_window.nodelay(1)
self.pressedkey = -1
# History tag
self._init_history()
def load_config(self, config):
"""Load the outputs section of the configuration file."""
# Load the theme
if config is not None and config.has_section('outputs'):
logger.debug('Read the outputs section in the configuration file')
self.theme['name'] = config.get_value('outputs', 'curse_theme', default='black')
logger.debug('Theme for the curse interface: {}'.format(self.theme['name']))
def is_theme(self, name):
"""Return True if the theme *name* should be used."""
return getattr(self.args, 'theme_' + name) or self.theme['name'] == name
def _init_history(self):
"""Init the history option."""
self.reset_history_tag = False
def _init_cursor(self):
"""Init cursors."""
if hasattr(curses, 'noecho'):
curses.noecho()
if hasattr(curses, 'cbreak'):
curses.cbreak()
self.set_cursor(0)
def _init_colors(self):
"""Init the Curses color layout."""
# Set curses options
try:
if hasattr(curses, 'start_color'):
curses.start_color()
logger.debug('Curses interface compatible with {} colors'.format(curses.COLORS))
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
except Exception as e:
logger.warning('Error initializing terminal color ({})'.format(e))
# Init colors
if self.args.disable_bold:
A_BOLD = 0
self.args.disable_bg = True
else:
A_BOLD = curses.A_BOLD
self.title_color = A_BOLD
self.title_underline_color = A_BOLD | curses.A_UNDERLINE
self.help_color = A_BOLD
if curses.has_colors():
# The screen is compatible with a colored design
if self.is_theme('white'):
# White theme: black ==> white
curses.init_pair(1, curses.COLOR_BLACK, -1)
else:
curses.init_pair(1, curses.COLOR_WHITE, -1)
if self.args.disable_bg:
curses.init_pair(2, curses.COLOR_RED, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
curses.init_pair(4, curses.COLOR_BLUE, -1)
curses.init_pair(5, curses.COLOR_MAGENTA, -1)
else:
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(6, curses.COLOR_RED, -1)
curses.init_pair(7, curses.COLOR_GREEN, -1)
curses.init_pair(8, curses.COLOR_BLUE, -1)
# Colors text styles
self.no_color = curses.color_pair(1)
self.default_color = curses.color_pair(3) | A_BOLD
self.nice_color = curses.color_pair(5)
self.cpu_time_color = curses.color_pair(5)
self.ifCAREFUL_color = curses.color_pair(4) | A_BOLD
self.ifWARNING_color = curses.color_pair(5) | A_BOLD
self.ifCRITICAL_color = curses.color_pair(2) | A_BOLD
self.default_color2 = curses.color_pair(7)
self.ifCAREFUL_color2 = curses.color_pair(8) | A_BOLD
self.ifWARNING_color2 = curses.color_pair(5) | A_BOLD
self.ifCRITICAL_color2 = curses.color_pair(6) | A_BOLD
self.filter_color = A_BOLD
self.selected_color = A_BOLD
if curses.COLOR_PAIRS > 8:
colors_list = [curses.COLOR_MAGENTA, curses.COLOR_CYAN, curses.COLOR_YELLOW]
for i in range(0, 3):
try:
curses.init_pair(i + 9, colors_list[i], -1)
except Exception:
if self.is_theme('white'):
curses.init_pair(i + 9, curses.COLOR_BLACK, -1)
else:
curses.init_pair(i + 9, curses.COLOR_WHITE, -1)
self.nice_color = curses.color_pair(9)
self.cpu_time_color = curses.color_pair(9)
self.ifWARNING_color2 = curses.color_pair(9) | A_BOLD
self.filter_color = curses.color_pair(10) | A_BOLD
self.selected_color = curses.color_pair(11) | A_BOLD
else:
# The screen is NOT compatible with a colored design
# switch to B&W text styles
self.no_color = curses.A_NORMAL
self.default_color = curses.A_NORMAL
self.nice_color = A_BOLD
self.cpu_time_color = A_BOLD
self.ifCAREFUL_color = curses.A_UNDERLINE
self.ifWARNING_color = A_BOLD
self.ifCRITICAL_color = curses.A_REVERSE
self.default_color2 = curses.A_NORMAL
self.ifCAREFUL_color2 = curses.A_UNDERLINE
self.ifWARNING_color2 = A_BOLD
self.ifCRITICAL_color2 = curses.A_REVERSE
self.filter_color = A_BOLD
self.selected_color = A_BOLD
# Define the colors list (hash table) for stats
self.colors_list = {
'DEFAULT': self.no_color,
'UNDERLINE': curses.A_UNDERLINE,
'BOLD': A_BOLD,
'SORT': curses.A_UNDERLINE | A_BOLD,
'OK': self.default_color2,
'MAX': self.default_color2 | A_BOLD,
'FILTER': self.filter_color,
'TITLE': self.title_color,
'PROCESS': self.default_color2,
'PROCESS_SELECTED': self.default_color2 | curses.A_UNDERLINE,
'STATUS': self.default_color2,
'NICE': self.nice_color,
'CPU_TIME': self.cpu_time_color,
'CAREFUL': self.ifCAREFUL_color2,
'WARNING': self.ifWARNING_color2,
'CRITICAL': self.ifCRITICAL_color2,
'OK_LOG': self.default_color,
'CAREFUL_LOG': self.ifCAREFUL_color,
'WARNING_LOG': self.ifWARNING_color,
'CRITICAL_LOG': self.ifCRITICAL_color,
'PASSWORD': curses.A_PROTECT,
'SELECTED': self.selected_color,
}
def set_cursor(self, value):
"""Configure the curse cursor appearance.
0: invisible
1: visible
2: very visible
"""
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(value)
except Exception:
pass
def get_key(self, window):
# @TODO: Check issue #163
ret = window.getch()
return ret
def __catch_key(self, return_to_browser=False):
# Catch the pressed key
self.pressedkey = self.get_key(self.term_window)
if self.pressedkey == -1:
return -1
# Actions (available in the global hotkey dict)...
logger.debug("Keypressed (code: {})".format(self.pressedkey))
for hotkey in self._hotkeys:
if self.pressedkey == ord(hotkey) and 'switch' in self._hotkeys[hotkey]:
# Get the option name
# Ex: disable_foo return foo
# enable_foo_bar return foo_bar
option = '_'.join(self._hotkeys[hotkey]['switch'].split('_')[1:])
if self._hotkeys[hotkey]['switch'].startswith('disable_'):
# disable_ switch
if getattr(self.args, self._hotkeys[hotkey]['switch']):
enable(self.args, option)
else:
disable(self.args, option)
elif self._hotkeys[hotkey]['switch'].startswith('enable_'):
# enable_ switch
if getattr(self.args, self._hotkeys[hotkey]['switch']):
disable(self.args, option)
else:
enable(self.args, option)
else:
# Others switchs options (with no enable_ or disable_)
setattr(
self.args,
self._hotkeys[hotkey]['switch'],
not getattr(self.args, self._hotkeys[hotkey]['switch']),
)
if self.pressedkey == ord(hotkey) and 'sort_key' in self._hotkeys[hotkey]:
glances_processes.set_sort_key(
self._hotkeys[hotkey]['sort_key'], self._hotkeys[hotkey]['sort_key'] == 'auto'
)
# Other actions...
if self.pressedkey == ord('\n'):
# 'ENTER' > Edit the process filter
self.edit_filter = not self.edit_filter
elif self.pressedkey == ord('4'):
# '4' > Enable or disable quicklook
self.args.full_quicklook = not self.args.full_quicklook
if self.args.full_quicklook:
self.enable_fullquicklook()
else:
self.disable_fullquicklook()
elif self.pressedkey == ord('5'):
# '5' > Enable or disable top menu
self.args.disable_top = not self.args.disable_top
if self.args.disable_top:
self.disable_top()
else:
self.enable_top()
elif self.pressedkey == ord('9'):
# '9' > Theme from black to white and reverse
self._init_colors()
elif self.pressedkey == ord('e'):
# 'e' > Enable/Disable process extended
self.args.enable_process_extended = not self.args.enable_process_extended
if not self.args.enable_process_extended:
glances_processes.disable_extended()
else:
glances_processes.enable_extended()
elif self.pressedkey == ord('E'):
# 'E' > Erase the process filter
glances_processes.process_filter = None
elif self.pressedkey == ord('f'):
# 'f' > Show/hide fs / folder stats
self.args.disable_fs = not self.args.disable_fs
self.args.disable_folders = not self.args.disable_folders
elif self.pressedkey == ord('k'):
# 'k' > Kill selected process (after confirmation)
self.kill_process = not self.kill_process
elif self.pressedkey == ord('w'):
# 'w' > Delete finished warning logs
glances_events.clean()
elif self.pressedkey == ord('x'):
# 'x' > Delete finished warning and critical logs
glances_events.clean(critical=True)
elif self.pressedkey == ord('z'):
# 'z' > Enable or disable processes
self.args.disable_process = not self.args.disable_process
if self.args.disable_process:
glances_processes.disable()
else:
glances_processes.enable()
elif self.pressedkey == curses.KEY_LEFT:
# "<" (left arrow) navigation through process sort
next_sort = (self.loop_position() - 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
elif self.pressedkey == curses.KEY_RIGHT:
# ">" (right arrow) navigation through process sort
next_sort = (self.loop_position() + 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
elif self.pressedkey == curses.KEY_UP or self.pressedkey == 65:
# 'UP' > Up in the server list
if self.args.cursor_position > 0:
self.args.cursor_position -= 1
elif self.pressedkey == curses.KEY_DOWN or self.pressedkey == 66:
# 'DOWN' > Down in the server list
# if self.args.cursor_position < glances_processes.max_processes - 2:
if self.args.cursor_position < glances_processes.processes_count:
self.args.cursor_position += 1
elif self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'):
# 'ESC'|'q' > Quit
if return_to_browser:
logger.info("Stop Glances client and return to the browser")
else:
logger.info("Stop Glances (keypressed: {})".format(self.pressedkey))
elif self.pressedkey == curses.KEY_F5:
# "F5" manual refresh requested
pass
# Return the key code
return self.pressedkey
def loop_position(self):
"""Return the current sort in the loop"""
for i, v in enumerate(self._sort_loop):
if v == glances_processes.sort_key:
return i
return 0
def disable_top(self):
"""Disable the top panel"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap', 'load']:
setattr(self.args, 'disable_' + p, True)
def enable_top(self):
"""Enable the top panel"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap', 'load']:
setattr(self.args, 'disable_' + p, False)
def disable_fullquicklook(self):
"""Disable the full quicklook mode"""
for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, False)
def enable_fullquicklook(self):
"""Disable the full quicklook mode"""
self.args.disable_quicklook = False
for p in ['cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, True)
def end(self):
"""Shutdown the curses window."""
if hasattr(curses, 'echo'):
curses.echo()
if hasattr(curses, 'nocbreak'):
curses.nocbreak()
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(1)
except Exception:
pass
curses.endwin()
def init_line_column(self):
"""Init the line and column position for the curses interface."""
self.init_line()
self.init_column()
def init_line(self):
"""Init the line position for the curses interface."""
self.line = 0
self.next_line = 0
def init_column(self):
"""Init the column position for the curses interface."""
self.column = 0
self.next_column = 0
def new_line(self, separator=False):
"""New line in the curses interface."""
self.line = self.next_line
def new_column(self):
"""New column in the curses interface."""
self.column = self.next_column
def separator_line(self, color='TITLE'):
"""New separator line in the curses interface."""
if not self.args.enable_separator:
return
self.new_line()
self.line -= 1
line_width = self.term_window.getmaxyx()[1] - self.column
self.term_window.addnstr(self.line, self.column,
unicode_message('MEDIUM_LINE', self.args) * line_width,
line_width,
self.colors_list[color])
def __get_stat_display(self, stats, layer):
"""Return a dict of dict with all the stats display.
# TODO: Drop extra parameter
:param stats: Global stats dict
:param layer: ~ cs_status
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:returns: dict of dict
* key: plugin name
* value: dict returned by the get_stats_display Plugin method
"""
ret = {}
for p in stats.getPluginsList(enable=False):
if p == 'quicklook' or p == 'processlist':
# processlist is done later
# because we need to know how many processes could be displayed
continue
# Compute the plugin max size
plugin_max_width = None
if p in self._left_sidebar:
plugin_max_width = max(self._left_sidebar_min_width, self.term_window.getmaxyx()[1] - 105)
plugin_max_width = min(self._left_sidebar_max_width, plugin_max_width)
# Get the view
ret[p] = stats.get_plugin(p).get_stats_display(args=self.args, max_width=plugin_max_width)
return ret
def display(self, stats, cs_status=None):
"""Display stats on the screen.
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
:return: True if the stats have been displayed else False if the help have been displayed
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
# Update the stats messages
###########################
# Get all the plugins but quicklook and process list
self.args.cs_status = cs_status
__stat_display = self.__get_stat_display(stats, layer=cs_status)
# Adapt number of processes to the available space
max_processes_displayed = (
self.term_window.getmaxyx()[0]
- 11
- (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"]))
- (
0
if 'processcount' not in __stat_display
else self.get_stats_display_height(__stat_display["processcount"])
)
- (0 if 'amps' not in __stat_display else self.get_stats_display_height(__stat_display["amps"]))
- (0 if 'alert' not in __stat_display else self.get_stats_display_height(__stat_display["alert"]))
)
try:
if self.args.enable_process_extended:
max_processes_displayed -= 4
except AttributeError:
pass
if max_processes_displayed < 0:
max_processes_displayed = 0
if glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed:
logger.debug("Set number of displayed processes to {}".format(max_processes_displayed))
glances_processes.max_processes = max_processes_displayed
# Get the processlist
__stat_display["processlist"] = stats.get_plugin('processlist').get_stats_display(args=self.args)
# Display the stats on the curses interface
###########################################
# Help screen (on top of the other stats)
if self.args.help_tag:
# Display the stats...
self.display_plugin(stats.get_plugin('help').get_stats_display(args=self.args))
# ... and exit
return False
# =====================================
# Display first line (system+ip+uptime)
# Optionally: Cloud on second line
# =====================================
self.__display_header(__stat_display)
self.separator_line()
# ==============================================================
# Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP)
# ==============================================================
self.__display_top(__stat_display, stats)
self.init_column()
self.separator_line()
# ==================================================================
# Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time)
# ==================================================================
self.__display_left(__stat_display)
# ====================================
# Display right stats (process and co)
# ====================================
self.__display_right(__stat_display)
# =====================
# Others popup messages
# =====================
# Display edit filter popup
# Only in standalone mode (cs_status is None)
if self.edit_filter and cs_status is None:
new_filter = self.display_popup(
'Process filter pattern: \n\n'
+ 'Examples:\n'
+ '- python\n'
+ '- .*python.*\n'
+ '- /usr/lib.*\n'
+ '- name:.*nautilus.*\n'
+ '- cmdline:.*glances.*\n'
+ '- username:nicolargo\n'
+ '- username:^root ',
popup_type='input',
input_value=glances_processes.process_filter_input,
)
glances_processes.process_filter = new_filter
elif self.edit_filter and cs_status is not None:
self.display_popup('Process filter only available in standalone mode')
self.edit_filter = False
# Display kill process confirmation popup
# Only in standalone mode (cs_status is None)
if self.kill_process and cs_status is None:
selected_process_raw = stats.get_plugin('processlist').get_raw()[self.args.cursor_position]
confirm = self.display_popup(
'Kill process: {} (pid: {}) ?\n\nConfirm ([y]es/[n]o): '.format(
selected_process_raw['name'], selected_process_raw['pid']
),
popup_type='yesno',
)
if confirm.lower().startswith('y'):
try:
ret_kill = glances_processes.kill(selected_process_raw['pid'])
except Exception as e:
logger.error('Can not kill process {} ({})'.format(selected_process_raw['name'], e))
else:
logger.info(
'Kill signal has been sent to process {} (return code: {})'.format(
selected_process_raw['name'], ret_kill
)
)
elif self.kill_process and cs_status is not None:
self.display_popup('Kill process only available in standalone mode')
self.kill_process = False
# Display graph generation popup
if self.args.generate_graph:
self.display_popup('Generate graph in {}'.format(self.args.export_graph_path))
return True
def __display_header(self, stat_display):
"""Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud)
"""
# First line
self.new_line()
self.space_between_column = 0
l_uptime = 1
for i in ['system', 'ip', 'uptime']:
if i in stat_display:
l_uptime += self.get_stats_display_width(stat_display[i])
self.display_plugin(stat_display["system"], display_optional=(self.term_window.getmaxyx()[1] >= l_uptime))
self.space_between_column = 3
if 'ip' in stat_display:
self.new_column()
self.display_plugin(stat_display["ip"])
self.new_column()
self.display_plugin(
stat_display["uptime"],
add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0)
)
self.init_column()
if self.get_stats_display_width(stat_display["cloud"]) != 0:
# Second line (optional)
self.new_line()
self.display_plugin(stat_display["cloud"])
def __display_top(self, stat_display, stats):
"""Display the second line in the Curses interface.
<QUICKLOOK> + CPU|PERCPU + <GPU> + MEM + SWAP + LOAD
"""
self.init_column()
self.new_line()
# Init quicklook
stat_display['quicklook'] = {'msgdict': []}
# Dict for plugins width
plugin_widths = {}
for p in self._top:
plugin_widths[p] = (
self.get_stats_display_width(stat_display.get(p, 0)) if hasattr(self.args, 'disable_' + p) else 0
)
# Width of all plugins
stats_width = sum(itervalues(plugin_widths))
# Number of plugin but quicklook
stats_number = sum(
[int(stat_display[p]['msgdict'] != []) for p in self._top if not getattr(self.args, 'disable_' + p)]
)
if not self.args.disable_quicklook:
# Quick look is in the place !
if self.args.full_quicklook:
quicklook_width = self.term_window.getmaxyx()[1] - (
stats_width + 8 + stats_number * self.space_between_column
)
else:
quicklook_width = min(
self.term_window.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column),
self._quicklook_max_width - 5,
)
try:
stat_display["quicklook"] = stats.get_plugin('quicklook').get_stats_display(
max_width=quicklook_width, args=self.args
)
except AttributeError as e:
logger.debug("Quicklook plugin not available (%s)" % e)
else:
plugin_widths['quicklook'] = self.get_stats_display_width(stat_display["quicklook"])
stats_width = sum(itervalues(plugin_widths)) + 1
self.space_between_column = 1
self.display_plugin(stat_display["quicklook"])
self.new_column()
# Compute spaces between plugins
# Note: Only one space between Quicklook and others
plugin_display_optional = {}
for p in self._top:
plugin_display_optional[p] = True
if stats_number > 1:
self.space_between_column = max(1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1)))
for p in ['mem', 'cpu']:
# No space ? Remove optional stats
if self.space_between_column < 3:
plugin_display_optional[p] = False
plugin_widths[p] = (
self.get_stats_display_width(stat_display[p], without_option=True)
if hasattr(self.args, 'disable_' + p)
else 0
)
stats_width = sum(itervalues(plugin_widths)) + 1
self.space_between_column = max(
1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1))
)
else:
self.space_between_column = 0
# Display CPU, MEM, SWAP and LOAD
for p in self._top:
if p == 'quicklook':
continue
if p in stat_display:
self.display_plugin(stat_display[p], display_optional=plugin_display_optional[p])
if p != 'load':
# Skip last column
self.new_column()
# Space between column
self.space_between_column = 3
# Backup line position
self.saved_line = self.next_line
def __display_left(self, stat_display):
"""Display the left sidebar in the Curses interface."""
self.init_column()
if self.args.disable_left_sidebar:
return
for p in self._left_sidebar:
if (hasattr(self.args, 'enable_' + p) or hasattr(self.args, 'disable_' + p)) and p in stat_display:
self.new_line()
self.display_plugin(stat_display[p])
def __display_right(self, stat_display):
"""Display the right sidebar in the Curses interface.
docker + processcount + amps + processlist + alert
"""
# Do not display anything if space is not available...
if self.term_window.getmaxyx()[1] < self._left_sidebar_min_width:
return
# Restore line position
self.next_line = self.saved_line
# Display right sidebar
self.new_column()
for p in self._right_sidebar:
if (hasattr(self.args, 'enable_' + p) or hasattr(self.args, 'disable_' + p)) and p in stat_display:
if p not in p:
# Catch for issue #1470
continue
self.new_line()
if p == 'processlist':
self.display_plugin(
stat_display['processlist'],
display_optional=(self.term_window.getmaxyx()[1] > 102),
display_additional=(not MACOS),
max_y=(
self.term_window.getmaxyx()[0] - self.get_stats_display_height(stat_display['alert']) - 2
),
)
else:
self.display_plugin(stat_display[p])
def display_popup(
self, message, size_x=None, size_y=None, duration=3, popup_type='info', input_size=30, input_value=None
):
"""
Display a centered popup.
popup_type: ='info'
Just an information popup, no user interaction
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
popup_type='input'
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
popup_type='yesno'
Display a centered popup with the given message
If size_x and size_y: set the popup size
else set it automatically
Return True (yes) or False (no)
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if popup_type == 'input':
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.term_window.getmaxyx()[1]
screen_y = self.term_window.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(sentence_list):
popup.addnstr(2 + y, 2, m, len(m))
if popup_type == 'info':
# Display the popup
popup.refresh()
self.wait(duration * 1000)
return True
elif popup_type == 'input':
# Create a sub-window for the text field
sub_pop = popup.derwin(1, input_size, 2, 2 + len(m))
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
sub_pop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(sub_pop, insert_mode=True)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
if textbox.gather() != '':
logger.debug("User enters the following string: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User centers an empty string")
return None
elif popup_type == 'yesno':
# # Create a sub-window for the text field
sub_pop = popup.derwin(1, 2, len(sentence_list) + 1, len(m) + 2)
sub_pop.attron(self.colors_list['FILTER'])
# Init the field with the current value
sub_pop.addnstr(0, 0, '', 0)
# Display the popup
popup.refresh()
sub_pop.refresh()
# Create the textbox inside the sub-windows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextboxYesNo(sub_pop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
# self.term_window.keypad(0)
return textbox.gather()
def display_plugin(self, plugin_stats, display_optional=True, display_additional=True, max_y=65535, add_space=0):
"""Display the plugin_stats on the screen.
:param plugin_stats:
:param display_optional: display the optional stats if True
:param display_additional: display additional stats if True
:param max_y: do not display line > max_y
:param add_space: add x space (line) after the plugin
"""
# Exit if:
# - the plugin_stats message is empty
# - the display tag = False
if plugin_stats is None or not plugin_stats['msgdict'] or not plugin_stats['display']:
# Exit
return 0
# Get the screen size
screen_x = self.term_window.getmaxyx()[1]
screen_y = self.term_window.getmaxyx()[0]
# Set the upper/left position of the message
if plugin_stats['align'] == 'right':
# Right align (last column)
display_x = screen_x - self.get_stats_display_width(plugin_stats)
else:
display_x = self.column
if plugin_stats['align'] == 'bottom':
# Bottom (last line)
display_y = screen_y - self.get_stats_display_height(plugin_stats)
else:
display_y = self.line
# Display
x = display_x
x_max = x
y = display_y
for m in plugin_stats['msgdict']:
# New line
try:
if m['msg'].startswith('\n'):
# Go to the next line
y += 1
# Return to the first column
x = display_x
continue
except:
# Avoid exception (see issue #1692)
pass
# Do not display outside the screen
if x < 0:
continue
if not m['splittable'] and (x + len(m['msg']) > screen_x):
continue
if y < 0 or (y + 1 > screen_y) or (y > max_y):
break
# If display_optional = False do not display optional stats
if not display_optional and m['optional']:
continue
# If display_additional = False do not display additional stats
if not display_additional and m['additional']:
continue
# Is it possible to display the stat with the current screen size
# !!! Crash if not try/except... Why ???
try:
self.term_window.addnstr(
y,
x,
m['msg'],
# Do not display outside the screen
screen_x - x,
self.colors_list[m['decoration']],
)
except Exception:
pass
else:
# New column
# Python 2: we need to decode to get real screen size because
# UTF-8 special tree chars occupy several bytes.
# Python 3: strings are strings and bytes are bytes, all is
# good.
try:
x += len(u(m['msg']))
except UnicodeDecodeError:
# Quick and dirty hack for issue #745
pass
if x > x_max:
x_max = x
# Compute the next Glances column/line position
self.next_column = max(self.next_column, x_max + self.space_between_column)
self.next_line = max(self.next_line, y + self.space_between_line)
# Have empty lines after the plugins
self.next_line += add_space
def erase(self):
"""Erase the content of the screen."""
self.term_window.erase()
def flush(self, stats, cs_status=None):
"""Clear and update the screen.
:param stats: Stats database to display
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
"""
self.erase()
self.display(stats, cs_status=cs_status)
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Update the screen.
:param stats: Stats database to display
:param duration: duration of the loop
:param cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
:param return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
:return: True if exit key has been pressed else False
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1
# Wait duration (in s) time
isexitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) between two getch
self.term_window.timeout(100)
while not countdown.finished() and not isexitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
isexitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if pressedkey == curses.KEY_F5:
# Were asked to refresh
return isexitkey
if isexitkey and self.args.help_tag:
# Quit from help should return to main screen, not exit #1874
self.args.help_tag = not self.args.help_tag
isexitkey = False
return isexitkey
if not isexitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.wait(delay=int(countdown.get() * 1000))
return isexitkey
def wait(self, delay=100):
"""Wait delay in ms"""
curses.napms(100)
def get_stats_display_width(self, curse_msg, without_option=False):
"""Return the width of the formatted curses message."""
try:
if without_option:
# Size without options
c = len(
max(
''.join(
[
(u(u(nativestr(i['msg'])).encode('ascii', 'replace')) if not i['optional'] else "")
for i in curse_msg['msgdict']
]
).split('\n'),
key=len,
)
)
else:
# Size with all options
c = len(
max(
''.join(
[u(u(nativestr(i['msg'])).encode('ascii', 'replace')) for i in curse_msg['msgdict']]
).split('\n'),
key=len,
)
)
except Exception as e:
logger.debug('ERROR: Can not compute plugin width ({})'.format(e))
return 0
else:
return c
def get_stats_display_height(self, curse_msg):
"""Return the height of the formatted curses message.
The height is defined by the number of '\n' (new line).
"""
try:
c = [i['msg'] for i in curse_msg['msgdict']].count('\n')
except Exception as e:
logger.debug('ERROR: Can not compute plugin height ({})'.format(e))
return 0
else:
return c + 1
class GlancesCursesStandalone(_GlancesCurses):
"""Class for the Glances curse standalone."""
pass
class GlancesCursesClient(_GlancesCurses):
"""Class for the Glances curse client."""
pass
class GlancesTextbox(Textbox, object):
def __init__(self, *args, **kwargs):
super(GlancesTextbox, self).__init__(*args, **kwargs)
def do_command(self, ch):
if ch == 10: # Enter
return 0
if ch == 127: # Back
return 8
return super(GlancesTextbox, self).do_command(ch)
class GlancesTextboxYesNo(Textbox, object):
def __init__(self, *args, **kwargs):
super(GlancesTextboxYesNo, self).__init__(*args, **kwargs)
def do_command(self, ch):
return super(GlancesTextboxYesNo, self).do_command(ch)
| nicolargo/glances | glances/outputs/glances_curses.py | Python | lgpl-3.0 | 47,546 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
from numpy import isclose
from dolfin import (assemble, dx, Function, FunctionSpace, grad, inner, solve, TestFunction, TrialFunction,
UnitSquareMesh)
from rbnics.backends import LinearSolver as FactoryLinearSolver
from rbnics.backends.dolfin import LinearSolver as DolfinLinearSolver
from test_dolfin_utils import RandomDolfinFunction
LinearSolver = None
AllLinearSolver = {"dolfin": DolfinLinearSolver, "factory": FactoryLinearSolver}
class Data(object):
def __init__(self, Th, callback_type):
# Create mesh and define function space
mesh = UnitSquareMesh(Th, Th)
self.V = FunctionSpace(mesh, "Lagrange", 1)
# Define variational problem
u = TrialFunction(self.V)
v = TestFunction(self.V)
self.a = inner(grad(u), grad(v)) * dx + inner(u, v) * dx
self.f = lambda g: g * v * dx
# Define callback function depending on callback type
assert callback_type in ("form callbacks", "tensor callbacks")
if callback_type == "form callbacks":
def callback(arg):
return arg
elif callback_type == "tensor callbacks":
def callback(arg):
return assemble(arg)
self.callback_type = callback_type
self.callback = callback
def generate_random(self):
# Generate random rhs
g = RandomDolfinFunction(self.V)
# Return
return (self.a, self.f(g))
def evaluate_builtin(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_builtin = Function(self.V)
if self.callback_type == "form callbacks":
solve(a == f, result_builtin, solver_parameters={"linear_solver": "mumps"})
elif self.callback_type == "tensor callbacks":
solve(a, result_builtin.vector(), f, "mumps")
return result_builtin
def evaluate_backend(self, a, f):
a = self.callback(a)
f = self.callback(f)
result_backend = Function(self.V)
solver = LinearSolver(a, result_backend, f)
solver.set_parameters({
"linear_solver": "mumps"
})
solver.solve()
return result_backend
def assert_backend(self, a, f, result_backend):
result_builtin = self.evaluate_builtin(a, f)
error = Function(self.V)
error.vector().add_local(+ result_backend.vector().get_local())
error.vector().add_local(- result_builtin.vector().get_local())
error.vector().apply("add")
relative_error = error.vector().norm("l2") / result_builtin.vector().norm("l2")
assert isclose(relative_error, 0., atol=1e-12)
@pytest.mark.parametrize("Th", [2**i for i in range(3, 9)])
@pytest.mark.parametrize("callback_type", ["form callbacks", "tensor callbacks"])
@pytest.mark.parametrize("test_type", ["builtin"] + list(AllLinearSolver.keys()))
def test_dolfin_linear_solver(Th, callback_type, test_type, benchmark):
data = Data(Th, callback_type)
print("Th = " + str(Th) + ", Nh = " + str(data.V.dim()))
if test_type == "builtin":
print("Testing " + test_type + ", callback_type = " + callback_type)
benchmark(data.evaluate_builtin, setup=data.generate_random)
else:
print("Testing " + test_type + " backend" + ", callback_type = " + callback_type)
global LinearSolver
LinearSolver = AllLinearSolver[test_type]
benchmark(data.evaluate_backend, setup=data.generate_random, teardown=data.assert_backend)
| mathLab/RBniCS | tests/performance/backends/dolfin/test_dolfin_linear_solver.py | Python | lgpl-3.0 | 3,651 |
# -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content
| deluxghost/DelogX | DelogX/defaults/plugins/delog_readmore/__init__.py | Python | lgpl-3.0 | 1,320 |
# Generated by Django 2.0.8 on 2018-11-05 20:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_auto_20180730_2035'),
]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'default_permissions': ('add', 'change', 'delete', 'view')},
),
]
| IntegratedAlarmSystem-Group/ias-webserver | tickets/migrations/0009_auto_20181105_2039.py | Python | lgpl-3.0 | 383 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'core_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75)),
))
db.send_create_signal(u'core', ['Category'])
# Adding model 'Source'
db.create_table(u'core_source', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=20)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75)),
('author', self.gf('django.db.models.fields.CharField')(max_length=75)),
('year_published', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('series_season', self.gf('django.db.models.fields.PositiveIntegerField')()),
('series_episode', self.gf('django.db.models.fields.PositiveIntegerField')()),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
))
db.send_create_signal(u'core', ['Source'])
# Adding model 'Prediction'
db.create_table(u'core_prediction', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Source'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Category'])),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('year_predicted', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('headline_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('headline_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=75)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Prediction'])
# Adding model 'Realisation'
db.create_table(u'core_realisation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('prediction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Prediction'])),
('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)),
('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)),
('year_introduced', self.gf('django.db.models.fields.PositiveIntegerField')()),
('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'core', ['Realisation'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table(u'core_category')
# Deleting model 'Source'
db.delete_table(u'core_source')
# Deleting model 'Prediction'
db.delete_table(u'core_prediction')
# Deleting model 'Realisation'
db.delete_table(u'core_realisation')
models = {
u'core.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'core.prediction': {
'Meta': {'object_name': 'Prediction'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Category']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'edition_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'headline_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'headline_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Source']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'year_predicted': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'core.realisation': {
'Meta': {'object_name': 'Realisation'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'edition_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'prediction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Prediction']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'year_introduced': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'core.source': {
'Meta': {'object_name': 'Source'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'description_D': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_E': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_credit': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'series_episode': ('django.db.models.fields.PositiveIntegerField', [], {}),
'series_season': ('django.db.models.fields.PositiveIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'year_published': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['core'] | jplusplus/dystopia-tracker | app/core/migrations/0001_initial.py | Python | lgpl-3.0 | 9,322 |
from msaf.models import dbsession, Sample, Marker, Batch
from msaf.lib.analytics import SampleSet
from itertools import cycle
import yaml
def load_yaml(yaml_text):
d = yaml.load( yaml_text )
instances = {}
for k in d:
if k == 'selector':
instances['selector'] = Selector.from_dict( d[k] )
elif k == 'filter':
instances['filter'] = Filter.from_dict( d[k] )
elif k == 'differentiation':
instances['differentiation'] = Differentiation.from_dict( d[k] )
else:
raise RuntimeError()
return instances
def save_yaml( instances ):
# we don't really need to save to YAML yet
pass
colours = cycle( [ 'red', 'green', 'blue', 'orange', 'purple', 'black', 'magenta',
'wheat', 'cyan', 'brown', 'slateblue', 'lightgreen' ] )
class Selector(object):
def __init__(self, samples = [], markers = []):
self.samples = []
self.markers = []
@staticmethod
def from_dict(d):
selector = Selector()
selector.samples = d['samples']
selector.markers = d['markers']
return selector
def to_dict(self):
return { 'samples': self.samples, 'markers': self.markers }
@staticmethod
def load(yaml_text):
d = yaml.load( yaml_text )
selector = Selector.from_dict( d )
return selector
def dump(self):
d = self.to_dict()
return yaml.dump( d )
def get_sample_ids(self, db):
""" return sample ids; db is SQLa dbsession handler """
pass
def get_marker_ids(self):
""" return marker ids; db is SQLa dbsession handler """
# self.markers is name
markers = [ Marker.search(name) for name in self.markers ]
return [ marker.id for marker in markers ]
def get_sample_sets(self, db=None):
if not db:
db = dbsession
sample_set = []
for label in self.samples:
if label == '__ALL__':
# single query
pass
sample_ids = []
sample_selector = self.samples[label]
for spec in sample_selector:
if 'query' in spec:
if '$' in spec['query']:
raise RuntimeError('query most not an advance one')
if 'batch' in spec:
query = spec['batch'] + '[batch] & (' + spec['query'] + ')'
elif 'codes' in spec:
batch = Batch.search(spec['batch'])
q = dbsession.query( Sample.id ).join( Batch ).filter( Batch.id == batch.id).filter( Sample.code.in_( spec['codes'] ) )
sample_ids += list( q )
if label == '__ALL__':
label = '-'
sample_set.append( SampleSet( location = '', year = 0,
label = label,
colour = next(colours),
sample_ids = sample_ids ) )
return sample_set
class Filter(object):
def __init__(self):
self.abs_threshold = 0
self.rel_threshold = 0
self.rel_cutoff = 0
self.sample_qual_threshold = 0
self.marker_qual_threshold = 0
self.sample_options = None
@staticmethod
def from_dict(d):
filter_params = Filter()
filter_params.abs_threshold = int( d['abs_threshold'] )
filter_params.rel_threshold = float( d['rel_threshold'] )
filter_params.rel_cutoff = float( d['rel_cutoff'] )
filter_params.sample_qual_threshold = float( d['sample_qual_threshold'] )
filter_params.marker_qual_threshold = float( d['marker_qual_threshold'] )
filter_params.sample_option = d['sample_option']
return filter_params
def to_dict(self):
pass
@staticmethod
def load(yaml_text):
pass
def dump(self):
pass
class Differentiation(object):
def __init__(self):
self.spatial = 0
self.temporal = 0
self.differentiation = 0
@staticmethod
def from_dict(d):
differentiation = Differentiation()
differentiation.spatial = d['spatial']
differentiation.temporal = d['temporal']
differentiation.detection = d['detection']
return differentiation
def to_dict(self):
pass
@staticmethod
def load(yaml_text):
pass
def dump(self):
pass
def create_group( selector ):
pass
| trmznt/msaf | msaf/lib/queryutils.py | Python | lgpl-3.0 | 4,648 |
#!/usr/bin/env python
# pylint: disable=R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Message Parse Modes."""
from typing import ClassVar
from telegram import constants
from telegram.utils.deprecate import set_new_attribute_deprecated
class ParseMode:
"""This object represents a Telegram Message Parse Modes."""
__slots__ = ('__dict__',)
MARKDOWN: ClassVar[str] = constants.PARSEMODE_MARKDOWN
""":const:`telegram.constants.PARSEMODE_MARKDOWN`\n
Note:
:attr:`MARKDOWN` is a legacy mode, retained by Telegram for backward compatibility.
You should use :attr:`MARKDOWN_V2` instead.
"""
MARKDOWN_V2: ClassVar[str] = constants.PARSEMODE_MARKDOWN_V2
""":const:`telegram.constants.PARSEMODE_MARKDOWN_V2`"""
HTML: ClassVar[str] = constants.PARSEMODE_HTML
""":const:`telegram.constants.PARSEMODE_HTML`"""
def __setattr__(self, key: str, value: object) -> None:
set_new_attribute_deprecated(self, key, value)
| leandrotoledo/python-telegram-bot | telegram/parsemode.py | Python | lgpl-3.0 | 1,783 |
# -*- coding: utf-8 -*-
"""
Base version of package/tasks.py, created by version 0.3.0 of
package/root/dir> dk-tasklib install
(it should reside in the root directory of your package)
This file defines tasks for the Invoke tool: http://www.pyinvoke.org
Basic usage::
inv -l # list all available tasks
inv build -f # build everything, forcefully
inv build --docs # only build the docs
dk-tasklib is a library of basic tasks that tries to automate common tasks.
dk-tasklib will attempt to install any tools/libraries/etc. that are required,
e.g. when running the task to compile x.less to x.css, it will check that
the lessc compiler is installed (and if not it will attempt to install it).
This file is an initial skeleton, you are supposed to edit and add to it so it
will fit your use case.
"""
# pragma: nocover
from __future__ import print_function
import os
import warnings
from dkfileutils.changed import changed
from dkfileutils.path import Path
from dktasklib.wintask import task
from invoke import Collection
from dktasklib import docs as doctools
from dktasklib import jstools
from dktasklib import lessc
from dktasklib import version, upversion
from dktasklib.manage import collectstatic
from dktasklib.package import Package, package
from dktasklib.watch import Watcher
from dktasklib.publish import publish
#: where tasks.py is located (root of package)
DIRNAME = Path(os.path.dirname(__file__))
# collectstatic
# --------------
# Specify which settings file should be used when running
# `python manage.py collectstatic` (must be on the path or package root
# directory).
DJANGO_SETTINGS_MODULE = ''
# .less
# ------
# there should be a mypkg/mypkg/less/mypkg.less file that imports any other
# needed sources
# .jsx (es6 source)
# ------------------
# list any .jsx files here. Only filename.jsx (don't include the path).
# The files should reside in mypkg/mypkg/js/ directory.
JSX_FILENAMES = []
# ============================================================================
# autodoc is in a separate process, so can't use settings.configure().
HAVE_SETTINGS = bool(DJANGO_SETTINGS_MODULE)
if not HAVE_SETTINGS and (DIRNAME / 'settings.py').exists():
# look for a dummy settings.py module in the root of the package.
DJANGO_SETTINGS_MODULE = 'settings'
if DJANGO_SETTINGS_MODULE:
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
WARN_ABOUT_SETTINGS = not bool(DJANGO_SETTINGS_MODULE)
@task
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
@task
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
warnings.warn(
"WARNING: build --less specified, but no file at: " + less_fname
)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE, force=force)
@task
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
# individual tasks that can be run from this project
ns = Collection(
build,
watch,
build_js,
lessc,
doctools,
version, upversion,
package,
collectstatic,
publish,
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
| datakortet/dk | tasks.py | Python | lgpl-3.0 | 4,792 |
from typing import Optional
from UM.Logger import Logger
from cura.CuraApplication import CuraApplication
from cura.PrinterOutput.Models.MaterialOutputModel import MaterialOutputModel
from .BaseCloudModel import BaseCloudModel
## Class representing a cloud cluster printer configuration
# Spec: https://api-staging.ultimaker.com/connect/v1/spec
class CloudClusterPrinterConfigurationMaterial(BaseCloudModel):
## Creates a new material configuration model.
# \param brand: The brand of material in this print core, e.g. 'Ultimaker'.
# \param color: The color of material in this print core, e.g. 'Blue'.
# \param guid: he GUID of the material in this print core, e.g. '506c9f0d-e3aa-4bd4-b2d2-23e2425b1aa9'.
# \param material: The type of material in this print core, e.g. 'PLA'.
def __init__(self, brand: Optional[str] = None, color: Optional[str] = None, guid: Optional[str] = None,
material: Optional[str] = None, **kwargs) -> None:
self.guid = guid
self.brand = brand
self.color = color
self.material = material
super().__init__(**kwargs)
## Creates a material output model based on this cloud printer material.
def createOutputModel(self) -> MaterialOutputModel:
material_manager = CuraApplication.getInstance().getMaterialManager()
material_group_list = material_manager.getMaterialGroupListByGUID(self.guid) or []
# Sort the material groups by "is_read_only = True" first, and then the name alphabetically.
read_only_material_group_list = list(filter(lambda x: x.is_read_only, material_group_list))
non_read_only_material_group_list = list(filter(lambda x: not x.is_read_only, material_group_list))
material_group = None
if read_only_material_group_list:
read_only_material_group_list = sorted(read_only_material_group_list, key = lambda x: x.name)
material_group = read_only_material_group_list[0]
elif non_read_only_material_group_list:
non_read_only_material_group_list = sorted(non_read_only_material_group_list, key = lambda x: x.name)
material_group = non_read_only_material_group_list[0]
if material_group:
container = material_group.root_material_node.getContainer()
color = container.getMetaDataEntry("color_code")
brand = container.getMetaDataEntry("brand")
material_type = container.getMetaDataEntry("material")
name = container.getName()
else:
Logger.log("w", "Unable to find material with guid {guid}. Using data as provided by cluster"
.format(guid = self.guid))
color = self.color
brand = self.brand
material_type = self.material
name = "Empty" if self.material == "empty" else "Unknown"
return MaterialOutputModel(guid = self.guid, type = material_type, brand = brand, color = color, name = name)
| Patola/Cura | plugins/UM3NetworkPrinting/src/Cloud/Models/CloudClusterPrinterConfigurationMaterial.py | Python | lgpl-3.0 | 3,000 |
import weakref
import logging
logger = logging.getLogger(__name__)
import core.cons as cons
from core.api import api
from core.config import conf
from qt import signals
#Config parser
OPTION_IP_RENEW_ACTIVE = "ip_renew_active"
OPTION_RENEW_SCRIPT_ACTIVE = "renew_script_active"
class IPRenewerGUI:
""""""
def __init__(self, parent, ip_renewer):
""""""
self.ip_renewer = ip_renewer
self.weak_parent = weakref.ref(parent)
self.id_item_list = []
self.is_working = True
if self.can_change_ip():
self.id_item_list = [download_item.id for download_item in api.get_active_downloads().values() + api.get_queue_downloads().values()]
signals.on_stop_all.emit()
if conf.get_addon_option(OPTION_RENEW_SCRIPT_ACTIVE, default=False, is_bool=True):
self.ip_renewer.start_shell_script()
else:
self.ip_renewer.start_default_ip_renew()
self.status_msg = _("Changing IP...")
signals.status_bar_push_msg.emit(self.status_msg)
self.timer = self.parent.idle_timeout(1000, self.update)
else:
self.is_working = False
@property
def parent(self):
return self.weak_parent()
def can_change_ip(self):
""""""
for download_item in api.get_active_downloads().itervalues():
if download_item.start_time:
return False
return True
def update(self):
""""""
if not self.ip_renewer.is_running():
signals.status_bar_pop_msg.emit(self.status_msg)
for id_item in self.id_item_list:
api.start_download(id_item)
try:
self.parent.downloads.rows_buffer[id_item][1] = self.parent.downloads.icons_dict[cons.STATUS_QUEUE]
except Exception as err:
logger.debug(err)
self.timer.stop()
self.is_working = False | nitely/ochDownloader | addons/ip_renewer/ip_renewer_gui.py | Python | lgpl-3.0 | 2,017 |
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A script to convert the standard names information from the provided XML
file into a Python dictionary format.
Takes two arguments: the first is the XML file to process and the second
is the name of the file to write the Python dictionary file into.
By default, Iris will use the source XML file:
etc/cf-standard-name-table.xml
as obtained from:
http://cf-pcmdi.llnl.gov/documents/cf-standard-names
"""
from __future__ import (absolute_import, division, print_function)
import argparse
import pprint
import xml.etree.ElementTree as ET
STD_VALUES_FILE_TEMPLATE = '''
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This file contains a dictionary of standard value names that are mapped
to another dictionary of other standard name attributes. Currently only
the `canonical_unit` exists in these attribute dictionaries.
This file is automatically generated. Do not edit this file by hand.
The file will be generated during a standard build/installation:
python setup.py build
python setup.py install
Also, the file can be re-generated in the source distribution via:
python setup.py std_names
Or for more control (e.g. to use an alternative XML file) via:
python tools/generate_std_names.py XML_FILE MODULE_FILE
"""
from __future__ import (absolute_import, division, print_function)
STD_NAMES = '''.lstrip()
def process_name_table(tree, element_name, *child_elements):
"""
Yields a series of dictionaries with the key being the id of the entry element and the value containing
another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc.
"""
for elem in tree.iterfind(element_name):
sub_section = {}
for child_elem in child_elements:
found_elem = elem.find(child_elem)
sub_section[child_elem] = found_elem.text if found_elem is not None else None
yield {elem.get("id") : sub_section}
def to_dict(infile, outfile):
values = {}
aliases = {}
tree = ET.parse(infile)
for section in process_name_table(tree, 'entry', 'canonical_units'):
values.update(section)
for section in process_name_table(tree, 'alias', 'entry_id'):
aliases.update(section)
for key, valued in aliases.iteritems():
values.update({
key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')}
})
outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create Python code from CF standard name XML.')
parser.add_argument('input', type=argparse.FileType(),
metavar='INPUT',
help='Path to CF standard name XML')
parser.add_argument('output', type=argparse.FileType('w'),
metavar='OUTPUT',
help='Path to resulting Python code')
args = parser.parse_args()
to_dict(args.input, args.output)
| Jozhogg/iris | tools/generate_std_names.py | Python | lgpl-3.0 | 4,434 |
#!/usr/bin/env python3
# coding: utf-8
"""Launcher of the Mikado pick step."""
import argparse
import re
import sys
import os
from typing import Union, Dict
from ._utils import check_log_settings_and_create_logger, _set_pick_mode
import marshmallow
from ..configuration import DaijinConfiguration, MikadoConfiguration
from ..exceptions import InvalidConfiguration
from ..utilities.log_utils import create_default_logger, create_null_logger
from ..utilities import to_region, percentage
from ..utilities import IntervalTree, Interval
from ..configuration.configurator import load_and_validate_config
from ..picking import Picker
def _parse_regions(regions_string: Union[None,str]) -> Union[None, Dict[str, IntervalTree]]:
if regions_string is None:
return None
regions = dict()
if os.path.exists(regions_string):
with open(regions_string) as f_regions:
for counter, line in enumerate(f_regions, start=1):
try:
chrom, start, end = to_region(line)
except ValueError:
raise ValueError(f"Invalid region line, no. {counter}: {line}")
if chrom not in regions:
regions[chrom] = IntervalTree()
regions[chrom].add(Interval(start, end))
else:
chrom, start, end = to_region(regions_string)
regions[chrom] = IntervalTree.from_intervals([Interval(start, end)])
return regions
def _set_pick_output_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.output_format.source = args.source if args.source is not None else conf.pick.output_format.source
conf.pick.output_format.id_prefix = args.prefix if args.prefix is not None else conf.pick.output_format.id_prefix
conf.pick.output_format.report_all_external_metrics = True if args.report_all_external_metrics is True else \
conf.pick.output_format.report_all_external_metrics
conf.pick.output_format.report_all_orfs = True if args.report_all_orfs is True else \
conf.pick.output_format.report_all_orfs
conf.pick.files.log = args.log if args.log else conf.pick.files.log
pat = re.compile(r"\.(gff3|gff)")
if args.loci_out:
conf.pick.files.loci_out = args.loci_out if pat.search(args.loci_out) else "{0}.gff3".format(args.loci_out)
if args.monoloci_out:
conf.pick.files.monoloci_out = args.monoloci_out if pat.search(args.monoloci_out) else "{0}.gff3".format(
args.monoloci_out)
if args.subloci_out:
conf.pick.files.subloci_out = args.subloci_out if pat.search(args.subloci_out) else "{0}.gff3".format(
args.subloci_out)
return conf
def _set_pick_run_options(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.run_options.single_thread = args.single
conf.pick.run_options.exclude_cds = True if args.no_cds is True else conf.pick.run_options.exclude_cds
conf.pick.run_options.intron_range = tuple(sorted(args.intron_range)) if args.intron_range is not None \
else conf.pick.run_options.intron_range
conf.pick.run_options.shm = True if args.shm is not None else conf.pick.run_options.shm
if args.only_reference_update is True:
conf.pick.run_options.only_reference_update = True
conf.pick.run_options.reference_update = True
conf.pick.run_options.reference_update = True if args.reference_update is True else \
conf.pick.run_options.reference_update
conf.pick.run_options.check_references = True if args.check_references is True else \
conf.pick.run_options.check_references
return conf
def _set_pick_clustering_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.clustering.purge = False if args.no_purge is True else conf.pick.clustering.purge
conf.pick.clustering.flank = args.flank if args.flank is not None else conf.pick.clustering.flank
conf.pick.clustering.min_cds_overlap = args.min_clustering_cds_overlap if \
args.min_clustering_cds_overlap else conf.pick.clustering.min_cds_overlap
conf.pick.clustering.cds_only = True if args.cds_only else conf.pick.clustering.cds_only
if args.min_clustering_cdna_overlap is not None:
conf.pick.clustering.min_cdna_overlap = args.min_clustering_cdna_overlap
if args.min_clustering_cds_overlap is None:
conf.pick.clustering.min_cds_overlap = args.min_clustering_cdna_overlap
return conf
def _set_pick_as_options(conf: Union[DaijinConfiguration, MikadoConfiguration],
args) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.pick.alternative_splicing.pad = args.pad if args.pad is True else \
conf.pick.alternative_splicing.pad
conf.pick.alternative_splicing.ts_max_splices = True if args.pad_max_splices \
else conf.pick.alternative_splicing.ts_max_splices
conf.pick.alternative_splicing.ts_distance = True if args.pad_max_distance is not None else \
conf.pick.alternative_splicing.ts_distance
conf.pick.alternative_splicing.cds_only = True if args.as_cds_only is True else \
conf.pick.alternative_splicing.cds_only
conf.pick.alternative_splicing.keep_cds_disrupted_by_ri = True if args.keep_disrupted_cds is True \
else conf.pick.alternative_splicing.keep_cds_disrupted_by_ri
conf.pick.alternative_splicing.keep_retained_introns = False if args.exclude_retained_introns is True else \
conf.pick.alternative_splicing.keep_retained_introns
return conf
def _set_conf_values_from_args(conf: Union[DaijinConfiguration, MikadoConfiguration], args,
logger=create_null_logger()) -> Union[DaijinConfiguration, MikadoConfiguration]:
conf.multiprocessing_method = args.start_method if args.start_method else conf.multiprocessing_method
conf.threads = args.procs if args.procs is not None else conf.threads
if args.random_seed is True:
conf.seed = None
elif args.seed is not None:
conf.seed = args.seed
else:
pass
conf.pick.scoring_file = args.scoring_file if args.scoring_file is not None else conf.pick.scoring_file
conf.prepare.max_intron_length = args.max_intron_length if args.max_intron_length is not None else \
conf.prepare.max_intron_length
conf.serialise.codon_table = str(args.codon_table) if args.codon_table not in (False, None, True) \
else conf.serialise.codon_table
conf = _set_pick_output_options(conf, args)
conf = _set_pick_mode(conf, args.mode)
conf = _set_pick_run_options(conf, args)
conf = _set_pick_clustering_options(conf, args)
conf = _set_pick_as_options(conf, args)
try:
conf = load_and_validate_config(conf, logger=logger)
except marshmallow.exceptions.MarshmallowError as exc:
logger.critical("Invalid options specified for the configuration: {}".format(exc))
raise exc
return conf
def _check_db(conf: Union[MikadoConfiguration, DaijinConfiguration], args,
logger=create_null_logger()) -> Union[MikadoConfiguration, DaijinConfiguration]:
logger.debug("Checking the database")
if args.sqlite_db is not None:
if not os.path.exists(args.sqlite_db):
exc = InvalidConfiguration(f"Mikado database {args.sqlite_db} not found. Exiting.")
logger.critical(exc)
raise exc
logger.debug(f"Setting the database from the CLI to {args.sqlite_db}")
conf.db_settings.db = args.sqlite_db
conf.db_settings.dbtype = "sqlite"
if conf.db_settings.dbtype == "sqlite":
raw = conf.db_settings.db
db_basename = os.path.basename(conf.db_settings.db)
__compound = os.path.join(conf.pick.files.output_dir, db_basename)
__base = os.path.join(conf.pick.files.output_dir, db_basename)
found = False
for option in raw, __compound, __base:
if os.path.exists(option):
conf.db_settings.db = option
found = True
break
if found is False:
exc = InvalidConfiguration(f"Mikado database {conf.db_settings.db} not found. Exiting.")
logger.critical(exc)
raise exc
logger.debug(f"Found database: {conf.db_settings.dbtype}:///{conf.db_settings.db}")
return conf
def _check_pick_input(conf: Union[MikadoConfiguration, DaijinConfiguration], args,
logger=create_null_logger()) -> Union[MikadoConfiguration, DaijinConfiguration]:
if args.gff:
conf.pick.files.input = args.gff
if not os.path.exists(args.gff):
raise InvalidConfiguration("The input file {} does not exist. Please double check!".format(args.gff))
prep_gtf = os.path.join(conf.prepare.files.output_dir, conf.prepare.files.out)
if not os.path.exists(conf.pick.files.input):
if os.path.exists(prep_gtf):
conf.pick.files.input = prep_gtf
elif os.path.exists(conf.prepare.files.out):
conf.pick.files.input = conf.prepare.files.out
else:
exc = InvalidConfiguration("I tried to infer the input file from the prepare option, but failed. Please "
"point me to the correct file through the command line or by correcting the "
"configuration file.")
logger.critical(exc)
raise exc
if args.genome:
if not os.path.exists(args.genome):
raise InvalidConfiguration(f"The requested genome FASTA file does not seem to exist: {args.genome}")
conf.reference.genome = args.genome
if conf.pick.alternative_splicing.pad and not os.path.exists(conf.reference.genome):
exc = InvalidConfiguration("Transcript padding cannot function unless the genome file is specified. \
Please either provide a valid genome file or disable the padding.")
logger.critical(exc)
raise exc
conf = _check_db(conf, args, logger)
return conf
def check_run_options(mikado_configuration: Union[MikadoConfiguration, DaijinConfiguration],
args: argparse.Namespace, logger=create_null_logger()):
"""
Quick method to check the consistency of run option settings
from the namespace.
:param args: a Namespace
:param logger: a logger instance.
:return: args
"""
mikado_configuration = _set_conf_values_from_args(mikado_configuration, args, logger=logger)
mikado_configuration = _check_pick_input(mikado_configuration, args, logger)
mikado_configuration = load_and_validate_config(mikado_configuration, logger=logger)
return mikado_configuration
def pick(args):
"""
This function launches the pick step, using the data derived from the Namespace.
:param args: argparse Namespace with the configuration for the run.
"""
logger = create_default_logger("pick", level="WARNING")
mikado_configuration = load_and_validate_config(args.configuration, logger=logger)
# Create the output directory. Necessary to do it here to avoid the logger being put in the wrong place.
if args.output_dir is not None:
mikado_configuration.pick.files.output_dir = os.path.abspath(args.output_dir)
else:
mikado_configuration.pick.files.output_dir = os.path.abspath(mikado_configuration.pick.files.output_dir)
try:
os.makedirs(mikado_configuration.pick.files.output_dir, exist_ok=True)
except OSError:
exc = OSError("I cannot create the output directory {}. Aborting.".format(
mikado_configuration.pick.files.output_dir))
logger.critical(exc)
raise exc
mikado_configuration, logger = check_log_settings_and_create_logger(mikado_configuration, args.log, args.log_level,
section="pick")
mikado_configuration = check_run_options(mikado_configuration, args, logger=logger)
regions = _parse_regions(args.regions)
creator = Picker(mikado_configuration, commandline=" ".join(sys.argv), regions=regions)
creator()
def pick_parser():
"""
Parser for the picking step.
"""
parser = argparse.ArgumentParser(description="Launcher of the Mikado pipeline.")
parser.add_argument("--fasta", "--genome", default=None, dest="genome",
help="Genome FASTA file. Required for transcript padding.")
parser.add_argument("--start-method", dest="start_method",
choices=["fork", "spawn", "forkserver"],
default=None, help="Multiprocessing start method.")
parser.add_argument("--shm", default=False, action="store_true",
help="Flag. If switched, Mikado pick will copy the database to RAM (ie SHM) for faster access \
during the run.")
parser.add_argument("-p", "--procs", type=int, default=None,
help="""Number of processors to use. \
Default: look in the configuration file (1 if undefined)""")
parser.add_argument("--configuration", "--json-conf", dest="configuration", required=True,
help="Configuration file for Mikado.")
parser.add_argument("--scoring-file", dest="scoring_file",
type=str, default=None,
required=False,
help="Optional scoring file for the run. It will override the value set in the configuration.")
parser.add_argument("-i", "--intron-range",
dest="intron_range", type=int, nargs=2,
default=None,
help="""Range into which intron lengths should fall, as a couple of integers. \
Transcripts with intron lengths outside of this range will be penalised. Default: (60, 900)""")
padding = parser.add_mutually_exclusive_group()
padding.add_argument("--no-pad", dest="pad", default=None, action="store_false", help="Disable transcript padding.")
padding.add_argument("--pad", default=None,
action="store_true",
help="Whether to pad transcripts in loci.")
padding.add_argument("--codon-table", dest="codon_table", default=None,
help="""Codon table to use. Default: 0 (ie Standard, NCBI #1, but only ATG is considered \
a valid start codon.""")
parser.add_argument("--pad-max-splices", default=None, dest="pad_max_splices",
type=int, help="Maximum splice sites that can be crossed during transcript padding.")
parser.add_argument("--pad-max-distance", default=None, dest="pad_max_distance",
type=int, help="Maximum amount of bps that transcripts can be padded with (per side).")
parser.add_argument("-r", "--regions",
help="""Either a single region on the CLI or a file listing a series of target regions.
Mikado pick will only consider regions included in this string/file.
Regions should be provided in a WebApollo-like format: <chrom>:<start>..<end>""")
output = parser.add_argument_group("Options related to the output files.")
output.add_argument("-od", "--output-dir", dest="output_dir",
type=str, default=None,
help="Output directory. Default: current working directory")
output.add_argument("--subloci-out", type=str, default=None, dest="subloci_out")
output.add_argument("--monoloci-out", type=str, default=None, dest="monoloci_out")
output.add_argument("--loci-out", type=str, default=None, dest="loci_out",
help="""This output file is mandatory.
If it is not specified in the configuration file,
it must be provided here.""")
output.add_argument("--prefix", type=str, default=None,
help="Prefix for the genes. Default: Mikado")
output.add_argument('--source', type=str, default=None,
help='Source field to use for the output files.')
output.add_argument("--report-all-external-metrics", default=None,
action="store_true",
help="Boolean switch. If activated, Mikado will report all available external metrics, not just \
those requested for in the scoring configuration. This might affect speed in Minos analyses.")
parser.add_argument("--no_cds", action="store_true", default=False,
help="""Flag. If set, not CDS information will be printed out in the GFF output files.""")
parser.add_argument("--flank", default=None, type=int,
help="""Flanking distance (in bps) to group non-overlapping transcripts into a single \
superlocus. Default: determined by the configuration file.""")
parser.add_argument("--max-intron-length", default=None, type=int,
help="""Maximum intron length for a transcript. Default: inferred from the configuration \
file (default value there is 1,000,000 bps).""")
parser.add_argument('--no-purge', action='store_true', default=False,
dest="no_purge",
help='''Flag. If set, the pipeline will NOT suppress any loci \
whose transcripts do not pass the requirements set in the JSON file.''')
parser.add_argument("--cds-only", dest="cds_only",
default=None, action="store_true",
help=""""Flag. If set, Mikado will only look for overlap in the coding features \
when clustering transcripts (unless one transcript is non-coding, in which case the whole transcript will \
be considered). Please note that Mikado will only consider the **best** ORF for this. \
Default: False, Mikado will consider transcripts in their entirety.""")
parser.add_argument("--as-cds-only", dest="as_cds_only", default=None, action="store_true",
help="""Flag. If set, Mikado will only consider the CDS to determine whether a transcript
is a valid alternative splicing event in a locus.""")
parser.add_argument("--reference-update", dest="reference_update", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will prioritise transcripts marked as reference and will \
consider any other transcipt within loci only in reference to these reference transcripts. Novel loci will still be reported.""")
parser.add_argument("--report-all-orfs", default=False, action="store_true",
help="Boolean switch. If set to true, all ORFs will be reported, not just the primary.")
parser.add_argument("--only-reference-update", dest="only_reference_update", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will only keep loci where at least one of the transcripts \
is marked as "reference". CAUTION: if no transcript has been marked as reference, the output will be completely empty!""")
parser.add_argument("-eri", "--exclude-retained-introns", default=None, action="store_true",
help="""Exclude all retained intron alternative splicing events from the final output. \
Default: False. Retained intron events that do not dirsupt the CDS are kept by Mikado in the final output.""")
parser.add_argument("-kdc", "--keep-disrupted-cds", default=None, action="store_true",
help="""Keep in the final output transcripts whose CDS is most probably disrupted by a \
retained intron event. Default: False. Mikado will try to detect these instances and exclude them from the \
final output.""")
parser.add_argument("-mco", "--min-clustering-cdna-overlap", default=None, type=percentage,
help="Minimum cDNA overlap between two transcripts for them to be considered part of the same \
locus during the late picking stages. \
NOTE: if --min-cds-overlap is not specified, it will be set to this value! \
Default: 20%%.")
parser.add_argument("-mcso", "--min-clustering-cds-overlap", default=None, type=percentage,
help="Minimum CDS overlap between two transcripts for them to be considered part of the same \
locus during the late picking stages. \
NOTE: if not specified, and --min-cdna-overlap is specified on the command line, min-cds-overlap will be set to this value! \
Default: 20%%.")
parser.add_argument("--check-references", dest="check_references", default=None,
action="store_true",
help="""Flag. If switched on, Mikado will also check reference models against the general
transcript requirements, and will also consider them as potential fragments. This is useful in the context of e.g.
updating an *ab-initio* results with data from RNASeq, protein alignments, etc.
""")
parser.add_argument("-db", "--sqlite-db", dest="sqlite_db",
default=None, type=str,
help="Location of an SQLite database to overwrite what is specified \
in the configuration file.")
parser.add_argument("--single", action="store_true", default=False,
help="""Flag. If set, Creator will be launched with a single process, without involving the
multithreading apparatus. Useful for debugging purposes only.""")
log_options = parser.add_argument_group("Log options")
log_options.add_argument("-l", "--log", default=None,
help="""File to write the log to.
Default: decided by the configuration file.""")
verbosity = log_options.add_mutually_exclusive_group()
verbosity.add_argument("--verbose", default=None, dest="log_level", action="store_const", const="DEBUG")
verbosity.add_argument("--quiet", default=None, dest="log_level", action="store_const", const="WARNING")
verbosity.add_argument("-lv", "--log-level", dest="log_level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default=None,
help="Logging level. Default: retrieved by the configuration file.")
# parser.formatter_class = argparse.RawTextHelpFormatter
parser.add_argument("--mode", default=None,
choices=["nosplit", "stringent", "lenient", "permissive", "split"],
help="""Mode in which Mikado will treat transcripts with multiple ORFs.
- nosplit: keep the transcripts whole.
- stringent: split multi-orf transcripts if two consecutive ORFs have both BLAST hits
and none of those hits is against the same target.
- lenient: split multi-orf transcripts as in stringent, and additionally, also when
either of the ORFs lacks a BLAST hit (but not both).
- permissive: like lenient, but also split when both ORFs lack BLAST hits
- split: split multi-orf transcripts regardless of what BLAST data is available.""")
seed_group = parser.add_mutually_exclusive_group()
seed_group.add_argument("--seed", type=int, default=None, help="Random seed number. Default: 0.")
seed_group.add_argument("--random-seed", action="store_true", default=False,
help="Generate a new random seed number (instead of the default of 0)")
parser.add_argument("gff", nargs="?", default=None)
parser.set_defaults(func=pick)
return parser
| lucventurini/mikado | Mikado/subprograms/pick.py | Python | lgpl-3.0 | 23,916 |
#! /usr/bin/env python3
# suff
#
# show different suffixes amongst arguments
import sys
def main():
files = sys.argv[1:]
suffixes = {}
for filename in files:
suff = getsuffix(filename)
suffixes.setdefault(suff, []).append(filename)
for suff, filenames in sorted(suffixes.items()):
print(repr(suff), len(filenames))
def getsuffix(filename):
name, sep, suff = filename.rpartition('.')
return sep + suff if sep else ''
if __name__ == '__main__':
main()
| Orav/kbengine | kbe/src/lib/python/Tools/scripts/suff.py | Python | lgpl-3.0 | 536 |
#!/usr/bin/env python3
#----------------------------------------------------------------------------
#
# Copyright (C) 2015 José Flávio de Souza Dias Júnior
#
# This file is part of CXXII - http://www.joseflavio.com/cxxii/
#
# CXXII is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CXXII is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CXXII. If not, see http://www.gnu.org/licenses/.
#
#----------------------------------------------------------------------------
#
# Direitos Autorais Reservados (C) 2015 José Flávio de Souza Dias Júnior
#
# Este arquivo é parte de CXXII - http://www.joseflavio.com/cxxii/
#
# CXXII é software livre: você pode redistribuí-lo e/ou modificá-lo
# sob os termos da Licença Pública Menos Geral GNU conforme publicada pela
# Free Software Foundation, tanto a versão 3 da Licença, como
# (a seu critério) qualquer versão posterior.
#
# CXXII é distribuído na expectativa de que seja útil,
# porém, SEM NENHUMA GARANTIA; nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Consulte a
# Licença Pública Menos Geral do GNU para mais detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Menos Geral do GNU
# junto com CXXII. Se não, veja http://www.gnu.org/licenses/.
#
#----------------------------------------------------------------------------
# "Não vos conformeis com este mundo,
# mas transformai-vos pela renovação do vosso espírito,
# para que possais discernir qual é a vontade de Deus,
# o que é bom, o que lhe agrada e o que é perfeito."
# (Bíblia Sagrada, Romanos 12:2)
# "Do not conform yourselves to this age
# but be transformed by the renewal of your mind,
# that you may discern what is the will of God,
# what is good and pleasing and perfect."
# (Holy Bible, Romans 12:2)
import sys
if sys.version_info[0] < 3:
print('CXXII exige Python 3 ou mais recente.')
sys.exit(1)
import os
import time
import datetime
import unicodedata
import urllib.request
import tempfile
import zipfile
import re
from xml.etree.ElementTree import ElementTree as CXXII_XML_Arvore
#----------------------------------------------------------------------------
class CXXII_XML_Arquivo:
"""Classe que representa um arquivo XML."""
def __init__(self, endereco):
self.endereco = endereco
self.nome = NomeDoArquivo(endereco)
self.arvore = CXXII_XML_Arvore()
self.arvore.parse(endereco)
def raiz(self):
return self.arvore.getroot()
def CXXII_Baixar( url, destino=None, nome=None, forcar=False ):
"""Download de arquivo."""
if url[-1] == '/' : url = url[0:-1]
if destino is None :
global CXXII_Diretorio
destino = CXXII_Diretorio
if destino[-1] == os.sep : destino = destino[0:-1]
if nome is None : nome = url.replace('/', '_').replace(':', '_')
endereco = destino + os.sep + nome
existe = os.path.exists(endereco)
baixar = forcar or not existe
if not baixar:
global CXXII_Gerador_TempoMaximo
baixar = ( time.time() - os.path.getmtime(endereco) ) > CXXII_Gerador_TempoMaximo
if baixar:
try:
urllib.request.urlretrieve(url, endereco)
except:
raise Exception('Não foi possível baixar o gerador.')
return endereco
def CXXII_XML_Adicionar( endereco ):
global CXXII_XML_Arquivos
CXXII_XML_Arquivos.append( CXXII_XML_Arquivo(endereco) )
def CXXII_Separadores( endereco ):
"""Substitui os separadores "/" pelo separador real do sistema operacional."""
return endereco.replace('/', os.sep)
def CXXII_Python_Formato( arquivo ):
"""Retorna o valor de "# coding=".
arquivo -- Arquivo ou endereço de arquivo. Pode-se utilizar "/" como separador.
"""
if type(arquivo) is str: arquivo = open( CXXII_Separadores(arquivo), 'r', encoding='iso-8859-1' )
formato = 'utf-8'
arquivo.seek(0)
for i in range(2):
linha = arquivo.readline()
if linha.startswith('# coding='):
formato = linha[9:-1]
break
arquivo.close()
return formato
def CXXII_Abrir_Python( endereco ):
"""Abre um arquivo ".py" respeitando a codificação especificada no cabeçalho "# coding=".
endereco -- Endereço do arquivo. Pode-se utilizar "/" como separador.
"""
endereco = CXXII_Separadores(endereco)
return open( endereco, 'r', encoding=CXXII_Python_Formato(endereco) )
def CXXII_Atual( endereco, modo='w', formato='utf-8' ):
"""Determina o arquivo em geração atual.
endereco -- Endereço do arquivo desejado, relativo ao CXXII_Destino. Pode-se utilizar "/" como separador.
"""
global CXXII_Saida
global CXXII_Destino
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep: endereco = os.sep + endereco
if CXXII_Saida != None and CXXII_Saida != sys.stdout: CXXII_Saida.close()
arquivo = CXXII_Texto(CXXII_Destino + endereco)
diretorio = CXXII_Texto(os.path.dirname(arquivo))
if not os.path.exists(diretorio): os.makedirs(diretorio)
CXXII_Saida = open( arquivo, modo, encoding=formato )
def CXXII_Escrever( texto ):
"""Escreve no arquivo em geração atual. Ver CXXII_Atual()."""
global CXXII_Saida
if not CXXII_Saida is None:
CXXII_Saida.write(texto)
def CXXII_ContarIdentacao( linha ):
comprimento = len(linha)
if comprimento == 0: return 0
tamanho = comprimento - len(linha.lstrip())
if linha[0] == ' ': tamanho /= 4
return tamanho
def CXXII_Identar( linha, total=1 ):
espaco = '\t' if len(linha) > 0 and linha[0] == '\t' else ' '
while total > 0:
linha = espaco + linha
total -= 1
return linha
def CXXII_EscreverArquivo( endereco, inicio=1, fim=None, quebraFinal=True, formato='utf-8', dicGlobal=None, dicLocal=None ):
"""Escreve no arquivo em geração atual (CXXII_Atual()) o conteúdo de um arquivo-modelo.
Arquivo-modelo é qualquer conteúdo que contenha instruções CXXII embutidas.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
endereco -- Endereço do arquivo-modelo. Pode-se utilizar "/" como separador.
inicio -- Número inicial do intervalo de linhas desejado. Padrão: 1
fim -- Número final do intervalo de linhas desejado. Padrão: None (última linha)
quebraFinal -- Quebra de linha na última linha?
dicGlobal -- Ver globals()
dicLocal -- Ver locals()
"""
global CXXII_Saida
if CXXII_Saida is None: return
if dicGlobal is None: dicGlobal = globals()
if dicLocal is None: dicLocal = locals()
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep and os.path.exists(CXXII_Gerador_Diretorio + os.sep + endereco):
endereco = CXXII_Gerador_Diretorio + os.sep + endereco
codigo = []
modelo = open( endereco, 'r', encoding=formato )
linhas = list(modelo)
modelo.close()
total = len(linhas)
if inicio != 1 or fim != None:
inicio = inicio - 1 if inicio != None else 0
fim = fim if fim != None else total
linhas = linhas[inicio:fim]
if not quebraFinal and linhas[-1][-1] == '\n': linhas[-1] = linhas[-1][0:-1]
total = len(linhas)
identacao = 0
i = 0
while i < total:
linha = linhas[i]
if linha == '@@@\n':
i += 1
if i < total and identacao > 0 and linhas[i] == '@@@\n':
identacao -= 1
else:
while i < total and linhas[i] != '@@@\n':
linha = linhas[i]
identacao = CXXII_ContarIdentacao(linha)
codigo.append(linha)
linha = linha.strip()
if len(linha) > 0 and linha[-1] == ':':
if linha.startswith('for ') or linha.startswith('while '):
identacao += 1
i += 1
else:
codigo.append(CXXII_Identar('"""~\n', identacao))
finalComQuebra = False
while i < total and linhas[i] != '@@@\n':
linha = linhas[i]
finalComQuebra = linha.endswith('\n')
if not finalComQuebra: linha += '\n'
codigo.append(linha)
i += 1
if finalComQuebra: codigo.append('\n')
codigo.append(CXXII_Identar('"""\n', identacao))
i -= 1
i += 1
CXXII_Executar( CXXII_CompilarPython(codigo), dicGlobal, dicLocal )
def CXXII_Texto( texto, decodificar=False ):
if decodificar and type(texto) is bytes: texto = texto.decode(sys.getfilesystemencoding())
return unicodedata.normalize('NFC', texto)
def CXXII_EscapeParaTexto( texto ):
return texto.replace('\n','\\n').replace('\r','\\r').replace('\t','\\t').replace('\'','\\\'')
def CXXII_TextoParaEscape( texto ):
return texto.replace('\\n','\n').replace('\\r','\r').replace('\\t','\t').replace('\\\'','\'')
def NomeDoArquivo( endereco, extensao=True ):
if endereco[-1] == os.sep: endereco = endereco[0:-1]
nome = endereco[endereco.rfind(os.sep)+1:]
if not extensao:
nome = nome[0:len(nome)-nome.rfind('.')]
return nome
def CXXII_Compilar( endereco ):
"""Compila um arquivo codificado com a linguagem Python.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
endereco -- Endereço do arquivo ".py". Pode-se utilizar "/" como separador.
"""
endereco = CXXII_Separadores(endereco)
if endereco[0] != os.sep and os.path.exists(CXXII_Gerador_Diretorio + os.sep + endereco):
endereco = CXXII_Gerador_Diretorio + os.sep + endereco
py_arquivo = CXXII_Abrir_Python(endereco)
py = list(py_arquivo)
py_arquivo.close()
return CXXII_CompilarPython(py)
def CXXII_CompilarPython( codigoFonte ):
"""Compila um código fonte codificado com a linguagem Python."""
py = list(codigoFonte) if type(codigoFonte) != list else codigoFonte
if py[0].startswith('# coding='):
py = py[1:]
elif py[1].startswith('# coding='):
py = py[2:]
py[-1] += '\n'
i = 0
total = len(py)
embutido = re.compile('({{{[^{}]*}}})')
while i < total:
linha = py[i]
passo = 1
if linha.endswith('"""~\n'):
desconsiderar = False
tokenstr = None
cpre = None
for c in linha:
if tokenstr != None:
if c == tokenstr and cpre != '\\': tokenstr = None
elif c == '#':
desconsiderar = True
break
elif c == '\'' or c == '\"':
tokenstr = c
cpre = c
if desconsiderar:
i += passo
continue
linha = linha[:-5] + 'CXXII_Escrever(\''
a = i
b = a + 1
while b < total and not py[b].lstrip().startswith('"""'): b += 1
if b >= total: raise Exception('Bloco de escrita não finalizado: linha ' + str(i))
py[b] = py[b][py[b].index('"""')+3:]
passo = b - a
if (b-a) > 1:
primeiro = True
a += 1
while a < b:
linha += ( '\\n' if not primeiro else '' ) + CXXII_EscapeParaTexto( py[a][:-1] )
py[a] = ''
primeiro = False
a += 1
linhapos = 0
while True:
codigo = embutido.search(linha, linhapos)
if not codigo is None:
parte1 = \
linha[0:codigo.start(0)] +\
'\'+' +\
CXXII_TextoParaEscape(codigo.group(0)[3:-3]) +\
'+\''
parte2 = linha[codigo.end(0):]
linha = parte1 + parte2
linhapos = len(parte1)
else:
break
linha += '\');'
py[i] = linha
i += passo
return compile( ''.join(py), 'CXXII_Python', 'exec' )
def CXXII_Executar( python, dicGlobal=None, dicLocal=None ):
"""Executa um código Python pré-compilado com CXXII_Compilar() ou um arquivo Python.
Se o endereço do arquivo for relativo, ele primeiro será buscado em CXXII_Gerador_Diretorio.
python -- Código pré-compilado ou endereço do arquivo. Pode-se utilizar "/" como separador.
dicGlobal -- Ver globals()
dicLocal -- Ver locals()
"""
if dicGlobal is None: dicGlobal = globals()
if dicLocal is None: dicLocal = locals()
exec( CXXII_Compilar(python) if type(python) is str else python, dicGlobal, dicLocal )
#----------------------------------------------------------------------------
CXXII_Repositorio = 'http://www.joseflavio.com/cxxii/'
CXXII_Inicio = datetime.datetime.today()
CXXII_Gerador_Endereco = None
CXXII_Gerador_Diretorio = None
CXXII_Gerador_TempoMaximo = 6*60*60 #6h
CXXII_Gerador_Baixar = False
CXXII_Destino = None
CXXII_XML_Arquivos = []
CXXII_Extensao = 'xml'
CXXII_Saida = sys.stdout
CXXII_Diretorio = CXXII_Texto(os.path.expanduser('~')) + os.sep + 'CXXII'
CXXII_Geradores = CXXII_Diretorio + os.sep + 'Geradores'
if not os.path.exists(CXXII_Geradores): os.makedirs(CXXII_Geradores)
#----------------------------------------------------------------------------
try:
#----------------------------------------------------------------------------
argumentos = CXXII_Texto(' '.join(sys.argv), True)
argumentos = argumentos.replace(' -g', '###g')
argumentos = argumentos.replace(' -f', '###fSIM')
argumentos = argumentos.replace(' -t', '###tSIM')
argumentos = argumentos.replace(' -d', '###d')
argumentos = argumentos.replace(' -e', '###e')
argumentos = argumentos.replace(' -a', '###a')
argumentos = argumentos.split('###')
argumento_g = None
argumento_f = None
argumento_t = None
argumento_d = None
argumento_e = None
argumento_a = None
for argumento in argumentos[1:]:
valor = argumento[1:].strip()
if len(valor) == 0: continue
exec( 'argumento_' + argumento[0] + '=\'' + valor + '\'' )
if argumento_g is None or argumento_a is None:
print('\nCXXII 1.0-A1 : Gerador de arquivos a partir de XML\n')
print('cxxii -g GERADOR [-f] [-t] [-d DESTINO] [-e EXTENSAO] -a ARQUIVOS\n')
print('Argumentos:')
print(' -g URL ou endereço local do gerador a utilizar: .py ou .zip')
print(' Nome sem extensão = ' + CXXII_Repositorio + 'Nome.zip')
print(' -f Forçar download do gerador')
print(' -t Imprimir detalhes do erro que possa ocorrer')
print(' -d Destino dos arquivos gerados')
print(' -e Extensão padrão dos arquivos de entrada: xml')
print(' -a Arquivos XML de entrada ou diretórios que os contenham\n')
sys.exit(1)
#----------------------------------------------------------------------------
if argumento_e != None: CXXII_Extensao = argumento_e.lower()
argumento_a = argumento_a.replace('.' + CXXII_Extensao, '.' + CXXII_Extensao + '###')
argumento_a = argumento_a.split('###')
for xml in argumento_a:
xml = xml.strip()
if len(xml) == 0: continue
xml = CXXII_Texto(os.path.abspath(xml))
if os.path.isdir(xml):
for arquivo in os.listdir(xml):
arquivo = CXXII_Texto(arquivo)
if arquivo.lower().endswith('.' + CXXII_Extensao):
CXXII_XML_Adicionar(xml + os.sep + arquivo)
else:
CXXII_XML_Adicionar(xml)
if len(CXXII_XML_Arquivos) == 0:
sys.exit(0)
#----------------------------------------------------------------------------
try:
CXXII_Gerador_Baixar = not argumento_f is None
gerurl = argumento_g.startswith('http://')
if( gerurl and argumento_g[-1] == '/' ): argumento_g = argumento_g[0:-1]
gernome = argumento_g[argumento_g.rfind('/' if gerurl else os.sep)+1:]
gerpy = gernome.endswith('.py')
gerzip = gernome.endswith('.zip')
if gerurl:
argumento_g = CXXII_Baixar(url=argumento_g, destino=CXXII_Geradores, forcar=CXXII_Gerador_Baixar)
elif gerpy or gerzip:
argumento_g = CXXII_Texto(os.path.abspath(argumento_g))
else:
gerurl = True
gernome += '.zip'
gerzip = True
argumento_g = CXXII_Baixar(url=CXXII_Repositorio + gernome, destino=CXXII_Geradores, forcar=CXXII_Gerador_Baixar)
if gerzip:
destino = argumento_g[0:-4]
if not os.path.exists(destino): os.makedirs(destino)
CXXII_Gerador_Endereco = destino + os.sep + gernome[0:-4] + '.py'
descompactar = not os.path.exists(CXXII_Gerador_Endereco)
if not descompactar:
descompactar = os.path.getmtime(argumento_g) > os.path.getmtime(CXXII_Gerador_Endereco)
if descompactar:
zip = zipfile.ZipFile(argumento_g, 'r')
zip.extractall(destino)
del zip
else:
CXXII_Gerador_Endereco = argumento_g
CXXII_Gerador_Diretorio = CXXII_Texto(os.path.dirname(CXXII_Gerador_Endereco))
except:
raise Exception('Gerador inválido.')
#----------------------------------------------------------------------------
CXXII_Destino = argumento_d if not argumento_d is None else 'CXXII_' + CXXII_Inicio.strftime('%Y%m%d%H%M%S')
CXXII_Destino = CXXII_Texto(os.path.abspath(CXXII_Destino))
if not os.path.exists(CXXII_Destino): os.makedirs(CXXII_Destino)
#----------------------------------------------------------------------------
gerador_nome = ''
gerador_versao = ''
gerador_multiplo = True
cxxii_con = CXXII_Abrir_Python(CXXII_Gerador_Endereco)
cxxii_lin = list(cxxii_con)
cxxii_ini = 0
cxxii_tot = len(cxxii_lin)
while cxxii_ini < cxxii_tot and cxxii_lin[cxxii_ini] != '### CXXII\n': cxxii_ini += 1
if cxxii_ini < cxxii_tot:
fim = cxxii_ini + 1
while fim < cxxii_tot and cxxii_lin[fim] != '###\n': fim += 1
if fim < cxxii_tot: exec(''.join(cxxii_lin[(cxxii_ini+1):fim]))
cxxii_con.close()
del cxxii_con
del cxxii_lin
del cxxii_ini
del cxxii_tot
gerador_nome = gerador_nome if gerador_nome != None and len(gerador_nome) > 0 else NomeDoArquivo(argumento_g)
if gerador_versao == None: gerador_versao = 'Desconhecida'
if not type(gerador_versao) is str: gerador_versao = str(gerador_versao)
print( 'Gerador: ' + gerador_nome )
print( 'Versão: ' + gerador_versao )
#----------------------------------------------------------------------------
CXXII_Gerador_Compilado = CXXII_Compilar( CXXII_Gerador_Endereco )
CXXII_XML = CXXII_XML_Arquivos[0]
if gerador_multiplo:
for xml in CXXII_XML_Arquivos:
print(xml.endereco)
CXXII_XML = xml
CXXII_Executar( CXXII_Gerador_Compilado, globals(), locals() )
else:
CXXII_Executar( CXXII_Gerador_Compilado, globals(), locals() )
#----------------------------------------------------------------------------
except Exception as e:
if not argumento_t is None:
import traceback
traceback.print_exc()
print('Erro: ' + str(e))
#---------------------------------------------------------------------------- | joseflaviojr/cxxii | CXXII.py | Python | lgpl-3.0 | 20,202 |
# -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import res_country
| odoobgorg/addons | base_iso3166/models/__init__.py | Python | lgpl-3.0 | 120 |
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the Fieldsfile file loading plugin and FFHeader.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import zip
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import collections
import mock
import numpy as np
import iris
import iris.fileformats.ff as ff
import iris.fileformats.pp as pp
_MockField = collections.namedtuple('_MockField',
'lbext lblrec lbnrec raw_lbpack '
'lbuser boundary_packing')
# PP-field: LBPACK N1 values.
_UNPACKED = 0
_WGDOS = 1
_CRAY = 2
_GRIB = 3 # Not implemented.
_RLE = 4 # Not supported, deprecated FF format.
# PP-field: LBUSER(1) values.
_REAL = 1
_INTEGER = 2
_LOGICAL = 3 # Not implemented.
class TestFF_HEADER(tests.IrisTest):
def test_initialisation(self):
self.assertEqual(ff.FF_HEADER[0], ('data_set_format_version', (0,)))
self.assertEqual(ff.FF_HEADER[17], ('integer_constants', (99, 100)))
def test_size(self):
self.assertEqual(len(ff.FF_HEADER), 31)
@tests.skip_data
class TestFFHeader(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff_header = ff.FFHeader(self.filename)
self.valid_headers = (
'integer_constants', 'real_constants', 'level_dependent_constants',
'lookup_table', 'data'
)
self.invalid_headers = (
'row_dependent_constants', 'column_dependent_constants',
'fields_of_constants', 'extra_constants', 'temp_historyfile',
'compressed_field_index1', 'compressed_field_index2',
'compressed_field_index3'
)
def test_constructor(self):
# Test FieldsFile header attribute lookup.
self.assertEqual(self.ff_header.data_set_format_version, 20)
self.assertEqual(self.ff_header.sub_model, 1)
self.assertEqual(self.ff_header.vert_coord_type, 5)
self.assertEqual(self.ff_header.horiz_grid_type, 0)
self.assertEqual(self.ff_header.dataset_type, 3)
self.assertEqual(self.ff_header.run_identifier, 0)
self.assertEqual(self.ff_header.experiment_number, -32768)
self.assertEqual(self.ff_header.calendar, 1)
self.assertEqual(self.ff_header.grid_staggering, 3)
self.assertEqual(self.ff_header.time_type, -32768)
self.assertEqual(self.ff_header.projection_number, -32768)
self.assertEqual(self.ff_header.model_version, 802)
self.assertEqual(self.ff_header.obs_file_type, -32768)
self.assertEqual(self.ff_header.last_fieldop_type, -32768)
self.assertEqual(self.ff_header.first_validity_time,
(2011, 7, 10, 18, 0, 0, 191))
self.assertEqual(self.ff_header.last_validity_time,
(2011, 7, 10, 21, 0, 0, 191))
self.assertEqual(self.ff_header.misc_validity_time,
(2012, 4, 30, 18, 12, 13, -32768))
self.assertEqual(self.ff_header.integer_constants.shape, (46, ))
self.assertEqual(self.ff_header.real_constants.shape, (38, ))
self.assertEqual(self.ff_header.level_dependent_constants.shape,
(71, 8))
self.assertIsNone(self.ff_header.row_dependent_constants)
self.assertIsNone(self.ff_header.column_dependent_constants)
self.assertIsNone(self.ff_header.fields_of_constants)
self.assertIsNone(self.ff_header.extra_constants)
self.assertIsNone(self.ff_header.temp_historyfile)
self.assertIsNone(self.ff_header.compressed_field_index1)
self.assertIsNone(self.ff_header.compressed_field_index2)
self.assertIsNone(self.ff_header.compressed_field_index3)
self.assertEqual(self.ff_header.lookup_table, (909, 64, 5))
self.assertEqual(self.ff_header.total_prognostic_fields, 3119)
self.assertEqual(self.ff_header.data, (2049, 2961, -32768))
def test_str(self):
self.assertString(str(self.ff_header), ('FF', 'ffheader.txt'))
def test_repr(self):
target = "FFHeader('" + self.filename + "')"
self.assertEqual(repr(self.ff_header), target)
def test_shape(self):
self.assertEqual(self.ff_header.shape('data'), (2961, -32768))
@tests.skip_data
class TestFF2PP2Cube(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
def test_unit_pass_0(self):
# Test FieldsFile to PPFields cube load.
cube_by_name = collections.defaultdict(int)
cubes = iris.load(self.filename)
while cubes:
cube = cubes.pop(0)
standard_name = cube.standard_name
cube_by_name[standard_name] += 1
filename = '{}_{}.cml'.format(standard_name,
cube_by_name[standard_name])
self.assertCML(cube, ('FF', filename))
def test_raw_to_table_count(self):
filename = tests.get_data_path(('FF', 'n48_multi_field_table_count'))
cubes = iris.load_raw(filename)
ff_header = ff.FFHeader(filename)
table_count = ff_header.lookup_table[2]
self.assertEqual(len(cubes), table_count)
@tests.skip_data
class TestFFieee32(tests.IrisTest):
def test_iris_loading(self):
ff32_fname = tests.get_data_path(('FF', 'n48_multi_field.ieee32'))
ff64_fname = tests.get_data_path(('FF', 'n48_multi_field'))
ff32_cubes = iris.load(ff32_fname)
ff64_cubes = iris.load(ff64_fname)
for ff32, ff64 in zip(ff32_cubes, ff64_cubes):
# load the data
_, _ = ff32.data, ff64.data
self.assertEqual(ff32, ff64)
@tests.skip_data
class TestFFVariableResolutionGrid(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff2pp = ff.FF2PP(self.filename)
self.ff_header = self.ff2pp._ff_header
data_shape = (73, 96)
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[1])) * 5
lons = np.linspace(0, 180, data_shape[1]) + delta
lons = np.vstack([lons[:-1], lons[:-1] + 0.5 * np.diff(lons)]).T
lons = np.reshape(lons, lons.shape, order='F')
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[0])) * 5
lats = np.linspace(-90, 90, data_shape[0]) + delta
lats = np.vstack([lats[:-1], lats[:-1] + 0.5 * np.diff(lats)]).T
lats = np.reshape(lats, lats.shape, order='F')
self.ff_header.column_dependent_constants = lons
self.ff_header.row_dependent_constants = lats
self.U_grid_x = lons[:-1, 1]
self.V_grid_y = lats[:-1, 1]
self.P_grid_x = lons[:, 0]
self.P_grid_y = lats[:, 0]
self.orig_make_pp_field = pp.make_pp_field
def new_make_pp_field(header):
field = self.orig_make_pp_field(header)
field.stash = self.ff2pp._custom_stash
field.bdx = field.bdy = field.bmdi
return field
# Replace the pp module function with this new function;
# this gets called in PP2FF.
pp.make_pp_field = new_make_pp_field
def tearDown(self):
pp.make_pp_field = self.orig_make_pp_field
def _check_stash(self, stash, x_coord, y_coord):
self.ff2pp._custom_stash = stash
field = next(iter(self.ff2pp))
self.assertArrayEqual(x_coord, field.x, ('x_coord was incorrect for '
'stash {}'.format(stash)))
self.assertArrayEqual(y_coord, field.y, ('y_coord was incorrect for '
'stash {}'.format(stash)))
def test_p(self):
self._check_stash('m01s00i001', self.P_grid_x, self.P_grid_y)
def test_u(self):
self._check_stash('m01s00i002', self.U_grid_x, self.P_grid_y)
def test_v(self):
self._check_stash('m01s00i003', self.P_grid_x, self.V_grid_y)
class TestFFPayload(tests.IrisTest):
def _test_payload(self, mock_field, expected_depth, expected_type):
with mock.patch('iris.fileformats.ff.FFHeader') as mock_header:
mock_header.return_value = None
ff2pp = ff.FF2PP('Not real')
data_depth, data_type = ff2pp._payload(mock_field)
self.assertEqual(data_depth, expected_depth)
self.assertEqual(data_type, expected_type)
def test_payload_unpacked_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_unpacked_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_REAL], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 400, expected_type)
def test_payload_unpacked_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 1600, expected_type)
def test_payload_unpacked_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
raw_lbpack=_UNPACKED,
lbuser=[_INTEGER], boundary_packing=None)
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_wgdos_real(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=-1, lbnrec=100,
raw_lbpack=_WGDOS,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_integer(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_wgdos_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=-1, lbnrec=200,
raw_lbpack=_WGDOS,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_REAL], boundary_packing=None)
self._test_payload(mock_field, 200, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 800, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
raw_lbpack=_CRAY,
lbuser=[_INTEGER], boundary_packing=None)
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
if __name__ == '__main__':
tests.main()
| Jozhogg/iris | lib/iris/tests/test_ff.py | Python | lgpl-3.0 | 13,420 |
"""
Testing for the tree module (sklearn.tree).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from sklearn import tree
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeClassifier(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check regression on a toy dataset."""
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeRegressor(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.DecisionTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
def test_graphviz_toy():
"""Check correctness of graphviz output on a toy dataset."""
clf = tree.DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
from StringIO import StringIO
# test export code
out = StringIO()
tree.export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"X[0] <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test with feature_names
out = StringIO()
out = tree.export_graphviz(clf, out_file=out,
feature_names=["feature1", ""])
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"feature1 <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test improperly formed feature_names
out = StringIO()
assert_raises(IndexError, tree.export_graphviz,
clf, out, feature_names=[])
def test_iris():
"""Check consistency on dataset iris."""
for c in ('gini',
'entropy'):
clf = tree.DecisionTreeClassifier(criterion=c).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeClassifier(criterion=c,
max_features=2,
random_state=1).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.5, "Failed with criterion " + c + \
" and score = " + str(score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ('mse',):
clf = tree.DecisionTreeRegressor(criterion=c).fit(boston.data,
boston.target)
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 1, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeRegressor(criterion=c,
max_features=6,
random_state=1).fit(boston.data,
boston.target)
#using fewer features reduces the learning ability of this tree,
# but reduces training time.
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 2, "Failed with criterion " + c + \
" and score = " + str(score)
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
clf = tree.DecisionTreeClassifier(max_depth=1, max_features=1,
random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_arrayrepr():
"""Check the array representation."""
# Check resize
clf = tree.DecisionTreeRegressor(max_depth=None)
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
clf.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
clf = tree.DecisionTreeClassifier().fit(X, y)
assert_array_equal(clf.predict(X), y)
clf = tree.DecisionTreeRegressor().fit(X, y)
assert_array_equal(clf.predict(X), y)
def test_numerical_stability():
"""Check numerical stability."""
old_settings = np.geterr()
np.seterr(all="raise")
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
dt = tree.DecisionTreeRegressor()
dt.fit(X, y)
dt.fit(X, -y)
dt.fit(-X, y)
dt.fit(-X, -y)
np.seterr(**old_settings)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
clf = tree.DecisionTreeClassifier(compute_importances=True)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert 0 < X_new.shape[1] < X.shape[1]
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_true(clf.feature_importances_ is None)
def test_error():
"""Test that it gives proper exception on deficient input."""
# Invalid values for parameters
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_samples_leaf=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_depth=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_density=2.0).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_features=42).fit,
X, y)
# Wrong dimensions
clf = tree.DecisionTreeClassifier()
y2 = y[:-1]
assert_raises(ValueError, clf.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
clf = tree.DecisionTreeClassifier()
clf.fit(Xf, y)
assert_array_equal(clf.predict(T), true_result)
# predict before fitting
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict, T)
# predict on vector with different dims
clf.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, clf.predict, t[:, 1:])
# use values of max_features that are invalid
clf = tree.DecisionTreeClassifier(max_features=10)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features=-1)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features="foobar")
assert_raises(ValueError, clf.fit, X, y)
tree.DecisionTreeClassifier(max_features="auto").fit(X, y)
tree.DecisionTreeClassifier(max_features="sqrt").fit(X, y)
tree.DecisionTreeClassifier(max_features="log2").fit(X, y)
tree.DecisionTreeClassifier(max_features=None).fit(X, y)
# predict before fit
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict_proba, X)
clf.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, clf.predict_proba, X2)
# wrong sample shape
Xt = np.array(X).T
clf = tree.DecisionTreeClassifier()
clf.fit(np.dot(X, Xt), y)
assert_raises(ValueError, clf.predict, X)
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
# wrong length of sample mask
clf = tree.DecisionTreeClassifier()
sample_mask = np.array([1])
assert_raises(ValueError, clf.fit, X, y, sample_mask=sample_mask)
# wrong length of X_argsorted
clf = tree.DecisionTreeClassifier()
X_argsorted = np.array([1])
assert_raises(ValueError, clf.fit, X, y, X_argsorted=X_argsorted)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
for tree_class in [tree.DecisionTreeClassifier, tree.ExtraTreeClassifier]:
clf = tree_class(min_samples_leaf=5).fit(X, y)
out = clf.tree_.apply(X)
node_counts = np.bincount(out)
leaf_count = node_counts[node_counts != 0] # drop inner nodes
assert np.min(leaf_count) >= 5
def test_pickle():
import pickle
# classification
obj = tree.DecisionTreeClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (classification) "
# regression
obj = tree.DecisionTreeRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (regression) "
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
clf = tree.DecisionTreeClassifier()
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
clf = tree.DecisionTreeRegressor()
y_hat = clf.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_sample_mask():
"""Test sample_mask argument. """
# test list sample_mask
clf = tree.DecisionTreeClassifier()
sample_mask = [1] * len(X)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
# test different dtype
clf = tree.DecisionTreeClassifier()
sample_mask = np.ones((len(X),), dtype=np.int32)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
def test_X_argsorted():
"""Test X_argsorted argument. """
# test X_argsorted with different layout and dtype
clf = tree.DecisionTreeClassifier()
X_argsorted = np.argsort(np.array(X).T, axis=1).T
clf.fit(X, y, X_argsorted=X_argsorted)
assert_array_equal(clf.predict(T), true_result)
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
# Classification, single output
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = tree.DecisionTreeClassifier()
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_equal(clf.n_classes_, [2, 2])
assert_equal(clf.classes_, [[-1, 1], [-2, 2]])
if __name__ == "__main__":
import nose
nose.runmodule()
| seckcoder/lang-learn | python/sklearn/sklearn/tree/tests/test_tree.py | Python | unlicense | 15,491 |
def bytes_to_long(foo):
return 0
def long_to_bytes(foo):
return '\0'
| jerith/fake-plastic-pycrypto | Crypto/Util/number.py | Python | unlicense | 79 |
n = int(input())
st = [(-1, -2)]
s = 0
for i, h in enumerate(map(int, input().split() + [' -1'])):
if h > st[-1][1]:
st.append((i, h))
else:
while st[-1][1] >= h:
r = st.pop()
s = max(s, (i - r[0]) * r[1])
st.append((r[0], h))
print(s)
| altg0x0/info_tasks | 18/k.py | Python | unlicense | 292 |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django_redis import get_redis_connection
from users.models import PastebinUser
from users.forms import RegisterForm, LoginForm, ChangePreferencesForm, ChangePasswordForm, VerifyPasswordForm
from users.models import Favorite, SiteSettings
from pastes.models import Paste
from pastebin.util import Paginator
import math
def register_view(request):
"""
Register a new user
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is already authenticated
return render(request, 'users/register/already_logged_in.html')
else:
register_form = RegisterForm(request.POST or None)
if request.method == 'POST': # Form data was submitted
if register_form.is_valid(): # Form data is valid
# Create the user
with transaction.atomic():
user = User.objects.create_user(register_form.cleaned_data['username'],
"N/A", # we don't deal with email addresses
register_form.cleaned_data['password'])
PastebinUser.create_user(user)
# TODO: Show a different message if the registration fails
return render(request, 'users/register/register_success.html')
# Show the registration page
return render(request, "users/register/register.html", { "form": register_form })
def login_view(request):
"""
Log the user in
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is authenticated
return render(request, "users/login/logged_in.html")
else:
login_form = LoginForm(request.POST or None)
# User is NOT authenticated
if request.method == 'POST': # Form data was submitted
if login_form.is_valid(): # Form data is valid
user = authenticate(username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'])
if user is not None and user.is_active:
login(request, user)
return render(request, "users/login/logged_in.html")
else:
# Couldn't authenticate, either the username or password is wrong
error = "User doesn't exist or the password is incorrect."
login_form._errors['password'] = login_form.error_class([error])
# Show the login form
return render(request, "users/login/login.html", { "form": login_form })
def logout_view(request):
"""
Logout the user and show the logout page
"""
if request.user.is_authenticated():
logout(request)
return render(request, 'users/logout/logged_out.html')
def profile(request, username, tab="home", page=1):
"""
Show a publicly visible profile page
"""
page = int(page)
try:
profile_user = cache.get("user:%s" % username)
if profile_user == None:
profile_user = User.objects.get(username=username)
cache.set("user:%s" % username, profile_user)
elif profile_user == False:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
except ObjectDoesNotExist:
cache.set("user:%s" % username, False)
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
# Get user's settings
profile_settings = cache.get("site_settings:%s" % username)
if profile_settings == None:
try:
profile_settings = SiteSettings.objects.get(user=profile_user)
except ObjectDoesNotExist:
profile_settings = SiteSettings(user=profile_user)
profile_settings.save()
cache.set("site_settings:%s" % username, profile_settings)
if not profile_user.is_active:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
if request.user != profile_user:
total_paste_count = cache.get("user_public_paste_count:%s" % profile_user.username)
else:
total_paste_count = cache.get("user_paste_count:%s" % profile_user.username)
# If user is viewing his own profile, also include hidden pastes
if total_paste_count == None and request.user != profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).filter(hidden=False).count()
cache.set("user_public_paste_count:%s" % profile_user.username, total_paste_count)
elif total_paste_count == None and request.user == profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).count()
cache.set("user_paste_count:%s" % profile_user.username, total_paste_count)
total_favorite_count = cache.get("user_favorite_count:%s" % profile_user.username)
if total_favorite_count == None:
total_favorite_count = Favorite.objects.filter(user=profile_user).count()
cache.set("user_favorite_count:%s" % profile_user.username, total_favorite_count)
args = {"profile_user": profile_user,
"profile_settings": profile_settings,
"current_page": page,
"tab": tab,
"total_favorite_count": total_favorite_count,
"total_paste_count": total_paste_count}
if tab == "home":
return home(request, args)
elif tab == "pastes":
return pastes(request, profile_user, args, page)
elif tab == "favorites":
return favorites(request, profile_user, args, page)
# The remaining pages require authentication, so redirect through settings()
else:
return settings(request, profile_user, args, tab)
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args)
def home(request, args):
"""
Display user profile's home with the most recent pastes and favorites
"""
# Get favorites only if user has made them public
if args["profile_settings"].public_favorites or request.user == args["profile_user"]:
args["favorites"] = cache.get("profile_favorites:%s" % args["profile_user"].username)
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=args["profile_user"]).order_by('-added').select_related('paste')[:10]
cache.set("profile_favorites:%s" % args["profile_user"].username, args["favorites"])
if request.user == args["profile_user"]:
args["pastes"] = cache.get("profile_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=True, count=10)
cache.set("profile_pastes:%s" % args["profile_user"].username, args["pastes"])
else:
args["pastes"] = cache.get("profile_public_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=False, count=10)
cache.set("profile_public_pastes:%s" % args["profile_user"].username, args["pastes"])
return render(request, "users/profile/home/home.html", args)
def pastes(request, user, args, page=1):
"""
Show all of user's pastes
"""
PASTES_PER_PAGE = 15
args["total_pages"] = int(math.ceil(float(args["total_paste_count"]) / float(PASTES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
offset = (page-1) * PASTES_PER_PAGE
if request.user == user:
args["pastes"] = cache.get("user_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=True, offset=offset)
cache.set("user_pastes:%s:%s" % (user.username, page), args["pastes"])
else:
args["pastes"] = cache.get("user_public_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=False, offset=offset)
cache.set("user_public_pastes:%s:%s" % (user.username, page), args["pastes"])
args["pages"] = Paginator.get_pages(page, PASTES_PER_PAGE, args["total_paste_count"])
args["current_page"] = page
return render(request, "users/profile/pastes/pastes.html", args)
def favorites(request, user, args, page=1):
"""
Show all of user's favorites
"""
FAVORITES_PER_PAGE = 15
if not args["profile_settings"].public_favorites and request.user != args["profile_user"]:
# Don't show pastes to other users if the user doesn't want to
return render(request, "users/profile/favorites/favorites_hidden.html", args)
args["total_pages"] = int(math.ceil(float(args["total_favorite_count"]) / float(FAVORITES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
start = (page-1) * FAVORITES_PER_PAGE
end = start + FAVORITES_PER_PAGE
args["favorites"] = cache.get("user_favorites:%s:%s" % (user.username, page))
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=user).select_related("paste")[start:end]
cache.set("user_favorites:%s:%s" % (user.username, page), args["favorites"])
args["pages"] = Paginator.get_pages(page, FAVORITES_PER_PAGE, args["total_favorite_count"])
args["current_page"] = page
return render(request, "users/profile/favorites/favorites.html", args)
def remove_favorite(request):
"""
Remove a favorite and redirect the user back to the favorite listing
"""
if "favorite_id" not in request.POST or not int(request.POST["favorite_id"]):
return HttpResponse("Favorite ID was not valid.", status=422)
if "page" not in request.POST or not int(request.POST["page"]):
return HttpResponse("Page was not valid.", status=422)
favorite_id = int(request.POST["favorite_id"])
page = int(request.POST["page"])
favorite = Favorite.objects.get(id=favorite_id)
if not request.user.is_authenticated():
return HttpResponse("You are not authenticated", status=422)
if favorite.user != request.user:
return HttpResponse("You can't delete someone else's favorites.", status=422)
favorite.delete()
cache.delete("profile_favorites:%s" % request.user.username)
cache.delete("user_favorite_count:%s" % request.user.username)
return HttpResponseRedirect(reverse("users:favorites", kwargs={"username": request.user.username,
"page": page}))
def change_preferences(request, args):
"""
Change various profile-related preferences
"""
site_settings = SiteSettings.objects.get(user=request.user)
form = ChangePreferencesForm(request.POST or None, initial={"public_favorites": site_settings.public_favorites})
preferences_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
site_settings.public_favorites = cleaned_data["public_favorites"]
site_settings.save()
cache.set("site_settings:%s" % request.user.username, site_settings)
preferences_changed = True
args["form"] = form
args["preferences_changed"] = preferences_changed
return render(request, "users/settings/change_preferences/change_preferences.html", args)
def change_password(request, args):
"""
Change the user's password
"""
form = ChangePasswordForm(request.POST or None, user=request.user)
password_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
request.user.set_password(cleaned_data["new_password"])
request.user.save()
# Session auth hash needs to be updated after changing the password
# or the user will be logged out
update_session_auth_hash(request, request.user)
password_changed = True
args["form"] = form
args["password_changed"] = password_changed
return render(request, "users/settings/change_password/change_password.html", args)
def delete_account(request, args):
"""
Delete the user's account
"""
form = VerifyPasswordForm(request.POST or None, user=request.user)
if form.is_valid():
PastebinUser.delete_user(request.user)
logout(request)
return render(request, "users/settings/delete_account/account_deleted.html")
args["form"] = form
return render(request, "users/settings/delete_account/delete_account.html", args) | Matoking/pastebin-django | users/views.py | Python | unlicense | 14,332 |
#!/usr/bin/python
# NOTES
# This script will count the number of tweets within an output.txt file
import re
output = open("output.txt", "r");
regex = re.compile("\n\n");
newlinenewline = regex.findall(output.read());
print len(newlinenewline); | joekneebone/Most-Common-Words-on-Twitter | TweetCounter/count.py | Python | unlicense | 250 |
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import sys
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
#status = 'create'
status = 'update'
path_to_lc = sys.argv[1]
#path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_cluster_remaped_position_L3.hdf5'
f = h5py.File(path_to_lc, 'r+')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<3.)
z = f['/sky_position/redshift_S'].value[is_gal]
lx = f['/cluster_data/log_LceX_05_24'].value[is_gal]
percent_observed = 1.
lx_absorbed_05_24 = n.log10(10**lx * percent_observed)
d_L = cosmoMD.luminosity_distance(z)
dl_cm = (d_L.to(u.cm)).value
adjusting_factor = 0.35 # accounts for absorption for now !
fx_05_24 = 10**(lx_absorbed_05_24-adjusting_factor) / (4 * n.pi * dl_cm**2.)
fx_05_24_out = n.ones_like(f['/sky_position/redshift_S'].value)*-9999.
fx_05_24_out[is_gal] = fx_05_24
if status == 'create':
f['/cluster_data'].create_dataset('rxay_flux_05_24', data = fx_05_24_out )
if status == 'update':
f['/cluster_data/rxay_flux_05_24'][:] = fx_05_24_out
f.close()
| JohanComparat/pyEmerge | bin_cluster/lc_add_clusters.py | Python | unlicense | 1,223 |
import operator
def pozicijaSprite(broj, x_velicina):
#vraca pixel na kojem se sprite nalazi
pixel = broj * (x_velicina + 1) #1 je prazan red izmedu spritova
return(pixel)
#spriteSlova = ["A", "B", "C", "D", "E", "F", "G", "H", "i", "s", "e"]
spriteSlova = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "s", ",", "'", "1", "2", "4", "8", "6", "3", ".", "5", "7", "9", "0", "M", "B", "I", "N", "S", "E", "R", "T", " ", "-", "V","U" ,"A", "L", "O", "D", ":", "m", "j", "n", "u", "C", "H", "k", "l", "o", "p", "r", "t", "v", "z", "K", "P", "%", "/"]
def pixel2Ton(pixel):
rezolucija = 90
indent = -12 #extra pixeli
height = 3
broj = ( rezolucija - pixel - indent ) / height
return(int(broj))
predikati = {
0 : 0,
1 : -1,
2 : 1,
3 : 0
}
kljucevi = {
0 : ("d", ",,"),
1 : ("e", ",,"),
2 : ("f", ",,"),
3 : ("g", ",,"),
4 : ("a", ",,"),
5 : ("h", ",,"),
6 : ("c", ","),
7 : ("d", ","),
8 : ("e", ","),
9 : ("f", ","),
10 : ("g", ","),
11 : ("a", ","),
12 : ("h", ","),
13 : ("c", ""),
14 : ("d", ""),
15 : ("e", ""),
16 : ("f", ""),
17 : ("g", ""),
18 : ("a", ""),
19 : ("h", ""),
20 : ("c", "'"),
21 : ("d", "'"),
22 : ("e", "'"),
23 : ("f", "'"),
24 : ("g", "'"),
25 : ("a", "'"),
26 : ("h", "'"),
27 : ("c", "''"),
28 : ("d", "''"),
29 : ("e", "''"),
30 : ("f", "''"),
31 : ("g", "''"),
32 : ("a", "''"),
33 : ("h", "''"),
34 : ("c", "'''"),
35 : ("d", "'''"),
36 : ("e", "'''"),
37 : ("f", "'''"),
38 : ("g", "'''"),
39 : ("a", "'''"),
40 : ("h", "'''")
}
def removeLily(slovo):
return(slovo.replace(',', '').replace('\'', '').upper())
def slovoPozicija(slovo):
for i in [i for i,x in enumerate(spriteSlova) if x == slovo]:
return(i)
rijecnikNotnihVrijednosti = {
0 : "16",
1 : "8",
2 : "8.",
3 : "4",
4 : "416",
5 : "4.",
6 : "4.16",
7 : "2",
8 : "216",
9 : "28",
10 : "28.",
11 : "2.",
12 : "2.16",
13 : "2.8",
14 : "2.8.",
15 : "1"
}
def pixel2Pozicija(pixel):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def pixel2Trajanje(pixel):
indent = 4
width = 6
broj = ( pixel - indent ) / width
return(int(broj))
def ton2Pixel(ton):
rezolucija = 90
indent = -12
height = 3
pixel = rezolucija - indent - ( ton * height )
return(pixel)
def pozicija2Pixel(pozicija):
rezolucija = 90
indent = 19 #extra pixeli
width = 6
pixel = pozicija * width + indent
return(pixel)
def trajanje2Pixel(trajanje):
indent = 4
width = 6
pixel = trajanje * width + indent
return(pixel)
class dodaj_notu(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_chord(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class add_markup(object):
def __init__(self, pozicija, ton, trajanje, predikat):
self.pozicija=pozicija
self.ton=ton
self.trajanje=trajanje
self.predikat=predikat
self.ligatura=False
class cursor(object):
def __init__(self, pozicija, ton, trajanje):
self.pozicija = pozicija
self.ton = ton
self.trajanje = trajanje
self.sprite = 0
self.bg_scroll_x = 0
self.bg_scroll_y = 0
self.bg_scroll_x_offset = 0 #used for cursor follow efect
self.bg_scroll_y_offset = 0 #used for cursor follow efect
self.apsolute_x = 0 #used for cursor follow efect
self.apsolute_y = 0 #used for cursor follow efect
def checkXColision(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("kolizija na pocetku note s CL")
return(True)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note s CL")
return(True)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CL")
return(True)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("kolizija na pocetku note s CR")
return(True)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("kolizija na sredini note sa CR")
return(True)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("kolizija na kraju note s CR")
return(True)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("kolizija note unutar Cursora")
return(True)
else:
return(False)
#sortiraj listu klasa
#lista.sort(key=operator.attrgetter('broj'))
def findNote(nota, cursorLeft, trajanje):
if ( nota.pozicija == cursorLeft):
print("na pocetku note s CL")
return(1)
elif ( cursorLeft > nota.pozicija ) & ( cursorLeft < ( nota.pozicija + nota.trajanje )):
print("na sredini note s CL")
return(2)
elif ( cursorLeft == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CL")
return(3)
elif ( nota.pozicija == ( cursorLeft + trajanje)):
print("na pocetku note s CR")
return(4)
elif ( ( cursorLeft + trajanje ) > nota.pozicija ) & ( ( cursorLeft + trajanje ) < ( nota.pozicija + nota.trajanje )):
print("na sredini note sa CR")
return(5)
elif ( ( cursorLeft + trajanje ) == ( nota.pozicija + nota.trajanje )):
print("na kraju note s CR")
return(6)
elif ( ( cursorLeft < nota.pozicija ) & ( ( cursorLeft + trajanje ) > (nota.pozicija + nota.trajanje ))):
print("note unutar Cursora")
return(7)
else:
return(False)
letter2MidiNumberPrefix = {
"c" : "0",
"d" : "2",
"e" : "4",
"f" : "5",
"g" : "7",
"a" : "9",
"h" : "11",
}
letter2MidiOctave = {
",," : "24",
"," : "36",
"" : "48",
"'" : "60",
"''" : "72",
"'''" : "84",
}
predikat2Midi = {
0 : 0,
1 : 1,
2 : -1,
}
def nota2MidiNumber(nota):
return(int(letter2MidiNumberPrefix[kljucevi[nota.ton][0]]) + int(letter2MidiOctave[kljucevi[nota.ton][1]]) + int(predikat2Midi[nota.predikat]))
def get_git_revision_short_hash():
import subprocess
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
| bebox/lhp | source/lhpFunctions.py | Python | unlicense | 6,564 |
# python3
"""
Mastermind without kivy - by Luis
merciless edited by hans
"""
import random
import re
class G():
valid_chars = '123456'
secret_len = 5
solved = '+' * secret_len
regex_str = "^[{0}]{{{1},{1}}}$".format(valid_chars, secret_len)
valid_input = re.compile(regex_str) # regular expression for user input
def main():
secret = answer_generator()
print('Enter your guess of {} of these symbols: ({})'
.format(G.secret_len, G.valid_chars))
while True:
user_seq = user_guess()
output = handle_game(secret, user_seq)
result_msg = ('{} -> {}')
print(result_msg.format(user_seq, output))
if output == G.solved:
break
print('You have found the answer! Goodbye!')
def handle_game(answer, guess):
answer = list(answer) # no need to str() or to assign a new name
guess = list(guess)
output = ''
for i, ch in enumerate(guess):
if ch == answer[i]:
# eliminate hits from both lists, but leave position untouched
guess[i] = '°' # any char which is not in valid_chars
answer[i] = '^'
output += '+'
for ch in guess:
if ch in answer:
# remove hit from answer, position is no longer important
answer.remove(ch)
output += '-'
return output
def user_guess():
while True:
response = input() # no argument needed, default is ''
if G.valid_input.match(response):
return response
print("wrong input...")
def answer_generator(): # Creates random sequence of n characters
seq = ''
for _ in range(G.secret_len): # '_': we dont care for the value
seq += random.choice(G.valid_chars) # valid_chars string is iterable
return seq
if __name__ == '__main__':
main()
| hans-boden/pyws-fablab-lisbon | contribs/luis_mp/mm_proposal_wo_kivi.py | Python | unlicense | 1,919 |
from __future__ import print_function
import httplib2
import io
import os
import sys
import time
import dateutil.parser
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient.http import MediaIoBaseDownload
import pprint
#Change these to the day of the osiris infestation
YEAR_OF_INFECTION=2017
MONTH_OF_INFECTION=01
DAY_OF_INFECTION=01
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
#YOU NEED TO SET UP AN APPLICATION ON GOOGLE AND GENERATE A KEY AND CREATE THIS FILE
CLIENT_SECRET_FILE = 'revert_osiris.json'
APPLICATION_NAME = 'Revert Osiris'
#copy pasta form gdrive API help examples
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
pp = pprint.PrettyPrinter()
#grab first batch of possible infected files
results = service.files().list(pageSize=1,
fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
next_page = results.get('nextPageToken', None)
bad_files = []
done = False
next_page = None
while True:
results = service.files().list(pageToken=next_page, pageSize=100,
fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
break
else:
for item in items:
#Only act on files with osiris in the name.
if 'osiris' in item['name']:
bad_files.append(item)
next_page = results.get('nextPageToken', None)
print("Found {} bad files".format(len(bad_files)))
#Download a backup of all files just in case
for bad_item in bad_files:
revisions = service.revisions().list(fileId=bad_item['id'], fields='*').execute()
assert(len(revisions['revisions']) >= 2)
dt = dateutil.parser.parse(revisions['revisions'][-1]['modifiedTime'])
if dt.day == DAY_OF_INFECTION and dt.month = MONTH_OF_INFECTION and dt.year == YEAR_OF_INFECTION:
print("Last revision dates from virus day")
else:
print("Skipping {}, datastamp on file isn't from virus day")
continue
dt = dateutil.parser.parse(revisions['revisions'][-2]['modifiedTime'])
print("Date of second to last revision is: {}".format(dt))
request = service.revisions().get_media(fileId=bad_item['id'],
revisionId=revisions['revisions'][-2]['id'])
#Filenames are not unique in gdrive so append with file ID as well
new_filename = os.path.join('backup',
revisions['revisions'][-2]['originalFilename'] + '_' + bad_item['id'])
#If we are re-running script see if we already downloaded this file
if os.path.isfile(new_filename):
print("File {} already backed up, skipping".format(new_filename))
continue
fh = io.FileIO(new_filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download {}".format(int(status.progress() * 100)) )
count = 0
for bad_item in bad_files:
count = count + 1
#Do in batches just to be kind of safe.
if count > 50:
break
file_id = bad_item['id']
revisions = service.revisions().list(fileId=file_id, fields='*').execute()
if len(revisions['revisions']) < 2:
print("File has only 1 revision, skipping: {}".format(bad_item))
continue
file_meta = service.files().get(fileId=file_id, fields='*').execute()
dt_last = dateutil.parser.parse(revisions['revisions'][-1]['modifiedTime'])
dt_2nd_last = dateutil.parser.parse(revisions['revisions'][-2]['modifiedTime'])
if dt_last.day == DAY_OF_INFECTION and dt_last.month == MONTH_OF_INFECTION and dt_last.year == YEAR_OF_INFECTION:
print("Last revision dates from virus day")
else:
print("Skipping {}, datestamp on file isn't from virus day")
continue
orig_file_name = file_meta['originalFilename']
target_rev_name = revisions['revisions'][-2]['originalFilename']
#If the 2nd to last revision is also osiris, we can't simply revert
if 'osiris' in target_rev_name:
print("2nd to last rev filename has osiris in the name, skipping: ({})".format(target_rev_name))
#print out some debug info so we can figure out what we have multipe revisions with osiris
pp.pprint(file_meta)
print(' ')
pp.pprint(revisions)
continue
print("{}: {} revisions found".format(target_rev_name, len(revisions['revisions'])) )
#THESE ARE THE REALLY DANGEROUS STEPS, ONLY UNCOMMMENT IF YOU KNOW WHAT YOU ARE DOING!!!
rev_id_to_delete = revisions['revisions'][-1]['id']
print("service.revisions().delete(fileId={}, revisionId={}).execute()".format(file_id, rev_id_to_delete))
#del_rev = service.revisions().delete(fileId=file_id, revisionId=rev_id_to_delete).execute()
update_body = { 'name': target_rev_name }
print("service.files().update(fileId={}, body={}).execute()".format(file_id, update_body))
#update_name = service.files().update(fileId=file_id, body=update_body).execute()
if __name__ == '__main__':
main()
| smacfiggen/horus | revert_osiris.py | Python | unlicense | 6,871 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This script computes bounds on the privacy cost of training the
student model from noisy aggregation of labels predicted by teachers.
It should be used only after training the student (and therefore the
teachers as well). We however include the label files required to
reproduce key results from our paper (https://arxiv.org/abs/1610.05755):
the epsilon bounds for MNIST and SVHN students.
The command that computes the epsilon bound associated
with the training of the MNIST student model (100 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=mnist_250_teachers_labels.npy
--indices_file=mnist_250_teachers_100_indices_used_by_student.npy
The command that computes the epsilon bound associated
with the training of the SVHN student model (1000 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=svhn_250_teachers_labels.npy
--max_examples=1000
--delta=1e-6
"""
import os
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers.input import maybe_download
# These parameters can be changed to compute bounds for different failure rates
# or different model predictions.
tf.flags.DEFINE_integer("moments",8, "Number of moments")
tf.flags.DEFINE_float("noise_eps", 0.1, "Eps value for each call to noisymax.")
tf.flags.DEFINE_float("delta", 1e-5, "Target value of delta.")
tf.flags.DEFINE_float("beta", 0.09, "Value of beta for smooth sensitivity")
tf.flags.DEFINE_string("counts_file","","Numpy matrix with raw counts")
tf.flags.DEFINE_string("indices_file","",
"File containting a numpy matrix with indices used."
"Optional. Use the first max_examples indices if this is not provided.")
tf.flags.DEFINE_integer("max_examples",1000,
"Number of examples to use. We will use the first"
" max_examples many examples from the counts_file"
" or indices_file to do the privacy cost estimate")
tf.flags.DEFINE_float("too_small", 1e-10, "Small threshold to avoid log of 0")
tf.flags.DEFINE_bool("input_is_counts", False, "False if labels, True if counts")
FLAGS = tf.flags.FLAGS
def compute_q_noisy_max(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
q = 0.0
for c in counts_rest:
gap = -c
q += (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def compute_q_noisy_max_approx(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
# This code uses an approximation that is faster and easier
# to get local sensitivity bound on.
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
gap = -max(counts_rest)
q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def logmgf_exact(q, priv_eps, l):
"""Computes the logmgf value given q and privacy eps.
The bound used is the min of three terms. The first term is from
https://arxiv.org/pdf/1605.02065.pdf.
The second term is based on the fact that when event has probability (1-q) for
q close to zero, q can only change by exp(eps), which corresponds to a
much smaller multiplicative change in (1-q)
The third term comes directly from the privacy guarantee.
Args:
q: pr of non-optimal outcome
priv_eps: eps parameter for DP
l: moment to compute.
Returns:
Upper bound on logmgf
"""
if q < 0.5:
t_one = (1-q) * math.pow((1-q) / (1 - math.exp(priv_eps) * q), l)
t_two = q * math.exp(priv_eps * l)
t = t_one + t_two
try:
log_t = math.log(t)
except ValueError:
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
def logmgf_from_counts(counts, noise_eps, l):
"""
ReportNoisyMax mechanism with noise_eps with 2*noise_eps-DP
in our setting where one count can go up by one and another
can go down by 1.
"""
q = compute_q_noisy_max(counts, noise_eps)
return logmgf_exact(q, 2.0 * noise_eps, l)
def sens_at_k(counts, noise_eps, l, k):
"""Return sensitivity at distane k.
Args:
counts: an array of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
k: distance
Returns:
sensitivity: at distance k
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print("l too large to compute sensitivity")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
# determined by the first term and ind of q.
if counts[0] < counts[1] + k:
return 0
counts_sorted[0] -= k
counts_sorted[1] += k
val = logmgf_from_counts(counts_sorted, noise_eps, l)
counts_sorted[0] -= 1
counts_sorted[1] += 1
val_changed = logmgf_from_counts(counts_sorted, noise_eps, l)
return val_changed - val
def smoothed_sens(counts, noise_eps, l, beta):
"""Compute beta-smooth sensitivity.
Args:
counts: array of scors
noise_eps: noise parameter
l: moment of interest
beta: smoothness parameter
Returns:
smooth_sensitivity: a beta smooth upper bound
"""
k = 0
smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
while k < max(counts):
k += 1
sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
smoothed_sensitivity = max(
smoothed_sensitivity,
math.exp(-beta * k) * sensitivity_at_k)
if sensitivity_at_k == 0.0:
break
return smoothed_sensitivity
def main(unused_argv):
##################################################################
# If we are reproducing results from paper https://arxiv.org/abs/1610.05755,
# download the required binaries with label information.
##################################################################
# Binaries for MNIST results
paper_binaries_mnist = \
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
"https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"]
if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \
or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy":
maybe_download(paper_binaries_mnist, os.getcwd())
# Binaries for SVHN results
paper_binaries_svhn = ["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/svhn_250_teachers_labels.npy?raw=true"]
if FLAGS.counts_file == "svhn_250_teachers_labels.npy":
maybe_download(paper_binaries_svhn, os.getcwd())
input_mat = np.load(FLAGS.counts_file)
if FLAGS.input_is_counts:
counts_mat = input_mat
else:
# In this case, the input is the raw predictions. Transform
num_teachers, n = input_mat.shape
counts_mat = np.zeros((n, 10)).astype(np.int32)
for i in range(n):
for j in range(num_teachers):
counts_mat[i, int(input_mat[j, i])] += 1
n = counts_mat.shape[0]
num_examples = min(n, FLAGS.max_examples)
if not FLAGS.indices_file:
indices = np.array(range(num_examples))
else:
index_list = np.load(FLAGS.indices_file)
indices = index_list[:num_examples]
l_list = 1.0 + np.array(xrange(FLAGS.moments))
beta = FLAGS.beta
total_log_mgf_nm = np.array([0.0 for _ in l_list])
total_ss_nm = np.array([0.0 for _ in l_list])
noise_eps = FLAGS.noise_eps
for i in indices:
total_log_mgf_nm += np.array(
[logmgf_from_counts(counts_mat[i], noise_eps, l)
for l in l_list])
total_ss_nm += np.array(
[smoothed_sens(counts_mat[i], noise_eps, l, beta)
for l in l_list])
delta = FLAGS.delta
# We want delta = exp(alpha - eps l).
# Solving gives eps = (alpha - ln (delta))/l
eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list
print("Epsilons (Noisy Max): " + str(eps_list_nm))
print("Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list))
# If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps
# is eps,delta DP
# Also if beta < eps / 2(gamma +1), then adding noise 2(gamma+1) SS eta / eps
# where eta has density proportional to 1 / (1+|z|^gamma) is eps-DP
# Both from Corolloary 2.4 in
# http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf
# Print the first one's scale
ss_eps = 2.0 * beta * math.log(1/delta)
ss_scale = 2.0 / ss_eps
print("To get an " + str(ss_eps) + "-DP estimate of epsilon, ")
print("..add noise ~ " + str(ss_scale))
print("... times " + str(total_ss_nm / l_list))
print("Epsilon = " + str(min(eps_list_nm)) + ".")
if min(eps_list_nm) == eps_list_nm[-1]:
print("Warning: May not have used enough values of l")
# Data independent bound, as mechanism is
# 2*noise_eps DP.
data_ind_log_mgf = np.array([0.0 for _ in l_list])
data_ind_log_mgf += num_examples * np.array(
[logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list])
data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list
print("Data independent bound = " + str(min(data_ind_eps_list)) + ".")
return
if __name__ == "__main__":
tf.app.run()
| cshallue/models | research/differential_privacy/multiple_teachers/analysis.py | Python | apache-2.0 | 10,988 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova import utils
from nova.network import linux_net
from nova.openstack.common import cfg
from ryu.app.client import OFPClient
LOG = logging.getLogger(__name__)
ryu_linux_net_opt = cfg.StrOpt('linuxnet_ovs_ryu_api_host',
default='127.0.0.1:8080',
help='Openflow Ryu REST API host:port')
FLAGS = flags.FLAGS
FLAGS.register_opt(ryu_linux_net_opt)
def _get_datapath_id(bridge_name):
out, _err = utils.execute('ovs-vsctl', 'get', 'Bridge',
bridge_name, 'datapath_id', run_as_root=True)
return out.strip().strip('"')
def _get_port_no(dev):
out, _err = utils.execute('ovs-vsctl', 'get', 'Interface', dev,
'ofport', run_as_root=True)
return int(out.strip())
class LinuxOVSRyuInterfaceDriver(linux_net.LinuxOVSInterfaceDriver):
def __init__(self):
super(LinuxOVSRyuInterfaceDriver, self).__init__()
LOG.debug('ryu rest host %s', FLAGS.linuxnet_ovs_ryu_api_host)
self.ryu_client = OFPClient(FLAGS.linuxnet_ovs_ryu_api_host)
self.datapath_id = _get_datapath_id(
FLAGS.linuxnet_ovs_integration_bridge)
if linux_net.binary_name == 'nova-network':
for tables in [linux_net.iptables_manager.ipv4,
linux_net.iptables_manager.ipv6]:
tables['filter'].add_rule('FORWARD',
'--in-interface gw-+ --out-interface gw-+ -j DROP')
linux_net.iptables_manager.apply()
def plug(self, network, mac_address, gateway=True):
LOG.debug("network %s mac_adress %s gateway %s",
network, mac_address, gateway)
ret = super(LinuxOVSRyuInterfaceDriver, self).plug(
network, mac_address, gateway)
port_no = _get_port_no(self.get_dev(network))
self.ryu_client.create_port(network['uuid'], self.datapath_id, port_no)
return ret
| sileht/deb-openstack-quantum | quantum/plugins/ryu/nova/linux_net.py | Python | apache-2.0 | 2,786 |
#!/usr/bin/env python2
# find-kam-node.py: python2 example of loading kam, resolving kam node, and
# printing out BEL terms
#
# usage: find-kam-node.py <kam name> <source_bel_term>
from random import choice
from suds import *
from ws import *
import time
def load_kam(client, kam_name):
'''
Loads a KAM by name. This function will sleep until the KAM's
loadStatus is 'COMPLETE'.
'''
def call():
'''
Load the KAM and return result. Exit with error if 'loadStatus'
is FAILED.
'''
kam = client.create('Kam')
kam.name = kam_name
result = client.service.LoadKam(kam)
status = result['loadStatus']
if status == 'FAILED':
print 'FAILED!'
print sys.exc_info()[1]
exit_failure()
return result
# load kam and wait for completion
result = call()
while result['loadStatus'] != 'COMPLETE':
time.sleep(0.5)
result = call()
return result['handle']
if __name__ == '__main__':
from sys import argv, exit, stderr
if len(argv) != 3:
msg = 'usage: find-kam-node.py <kam name> <source_bel_term>\n'
stderr.write(msg)
exit(1)
# unpack command-line arguments; except the first script name argument
(kam_name, source_term) = argv[1:]
client = WS('http://localhost:8080/openbel-ws/belframework.wsdl')
handle = load_kam(client, kam_name)
print "loaded kam '%s', handle '%s'" % (kam_name, handle.handle)
# create nodes using BEL term labels from command-line
node = client.create("Node")
node.label = source_term
# resolve node
result = client.service.ResolveNodes(handle, [node], None)
if len(result) == 1 and result[0]:
the_node = result[0]
print "found node, id: %s" % (the_node.id)
terms = client.service.GetSupportingTerms(the_node, None)
for t in terms:
print t
else:
print "edge not found"
exit_success()
| OpenBEL/openbel-framework-examples | web-api/python/find-kam-node.py | Python | apache-2.0 | 2,020 |
"""
Weather component that handles meteorological data for your location.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/weather/
"""
import asyncio
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.const import PRECISION_WHOLE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
DOMAIN = 'weather'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_CONDITION_CLASS = 'condition_class'
ATTR_FORECAST = 'forecast'
ATTR_FORECAST_CONDITION = 'condition'
ATTR_FORECAST_PRECIPITATION = 'precipitation'
ATTR_FORECAST_TEMP = 'temperature'
ATTR_FORECAST_TEMP_LOW = 'templow'
ATTR_FORECAST_TIME = 'datetime'
ATTR_WEATHER_ATTRIBUTION = 'attribution'
ATTR_WEATHER_HUMIDITY = 'humidity'
ATTR_WEATHER_OZONE = 'ozone'
ATTR_WEATHER_PRESSURE = 'pressure'
ATTR_WEATHER_TEMPERATURE = 'temperature'
ATTR_WEATHER_VISIBILITY = 'visibility'
ATTR_WEATHER_WIND_BEARING = 'wind_bearing'
ATTR_WEATHER_WIND_SPEED = 'wind_speed'
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the weather component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup(config)
return True
class WeatherEntity(Entity):
"""ABC for weather data."""
@property
def temperature(self):
"""Return the platform temperature."""
raise NotImplementedError()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
raise NotImplementedError()
@property
def pressure(self):
"""Return the pressure."""
return None
@property
def humidity(self):
"""Return the humidity."""
raise NotImplementedError()
@property
def wind_speed(self):
"""Return the wind speed."""
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
return None
@property
def ozone(self):
"""Return the ozone level."""
return None
@property
def attribution(self):
"""Return the attribution."""
return None
@property
def visibility(self):
"""Return the visibility."""
return None
@property
def forecast(self):
"""Return the forecast."""
return None
@property
def precision(self):
"""Return the forecast."""
return PRECISION_TENTHS if self.temperature_unit == TEMP_CELSIUS \
else PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the state attributes."""
data = {
ATTR_WEATHER_TEMPERATURE: show_temp(
self.hass, self.temperature, self.temperature_unit,
self.precision),
}
humidity = self.humidity
if humidity is not None:
data[ATTR_WEATHER_HUMIDITY] = round(humidity)
ozone = self.ozone
if ozone is not None:
data[ATTR_WEATHER_OZONE] = ozone
pressure = self.pressure
if pressure is not None:
data[ATTR_WEATHER_PRESSURE] = pressure
wind_bearing = self.wind_bearing
if wind_bearing is not None:
data[ATTR_WEATHER_WIND_BEARING] = wind_bearing
wind_speed = self.wind_speed
if wind_speed is not None:
data[ATTR_WEATHER_WIND_SPEED] = wind_speed
visibility = self.visibility
if visibility is not None:
data[ATTR_WEATHER_VISIBILITY] = visibility
attribution = self.attribution
if attribution is not None:
data[ATTR_WEATHER_ATTRIBUTION] = attribution
if self.forecast is not None:
forecast = []
for forecast_entry in self.forecast:
forecast_entry = dict(forecast_entry)
forecast_entry[ATTR_FORECAST_TEMP] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP],
self.temperature_unit, self.precision)
if ATTR_FORECAST_TEMP_LOW in forecast_entry:
forecast_entry[ATTR_FORECAST_TEMP_LOW] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP_LOW],
self.temperature_unit, self.precision)
forecast.append(forecast_entry)
data[ATTR_FORECAST] = forecast
return data
@property
def state(self):
"""Return the current state."""
return self.condition
@property
def condition(self):
"""Return the current condition."""
raise NotImplementedError()
| persandstrom/home-assistant | homeassistant/components/weather/__init__.py | Python | apache-2.0 | 4,851 |
import argparse
import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
@python_app(cache=True)
def random_uuid(x, cache=True):
import uuid
return str(uuid.uuid4())
def test_python_memoization(n=2):
"""Testing python memoization disable
"""
x = random_uuid(0)
print(x.result())
for i in range(0, n):
foo = random_uuid(0)
print(foo.result())
assert foo.result() == x.result(), "Memoized results were not used"
if __name__ == '__main__':
parsl.clear()
parsl.load(config)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
x = test_python_memoization(n=4)
| Parsl/parsl | parsl/tests/test_python_apps/test_memoize_1.py | Python | apache-2.0 | 976 |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import os
import sys
from typing import List, Union
import numpy as np
from ludwig.api import LudwigModel
from ludwig.backend import ALL_BACKENDS, LOCAL, Backend
from ludwig.constants import FULL, TEST, TRAINING, VALIDATION
from ludwig.contrib import contrib_command
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.print_utils import (logging_level_registry, print_boxed,
print_ludwig)
from ludwig.utils.strings_utils import make_safe_filename
logger = logging.getLogger(__name__)
def collect_activations(
model_path: str,
layers: List[str],
dataset: str,
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
output_directory: str = 'results',
gpus: List[str] = None,
gpu_memory_limit: int =None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
debug: bool = False,
**kwargs
) -> List[str]:
"""
Uses the pretrained model to collect the tensors corresponding to a
datapoint in the dataset. Saves the tensors to the experiment directory
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param layers: (List[str]) list of strings for layer names in the model
to collect activations.
:param dataset: (str) source
containing the data to make predictions.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param split: (str, default: `full`) split on which
to perform predictions. Valid values are `'training'`, `'validation'`,
`'test'` and `'full'`.
:param batch_size: (int, default `128`) size of batches for processing.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the activations.
"""
logger.info('Dataset path: {}'.format(dataset)
)
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(
model_path,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
backend=backend
)
# collect activations
print_boxed('COLLECT ACTIVATIONS')
collected_tensors = model.collect_activations(
layers,
dataset,
data_format=data_format,
split=split,
batch_size=batch_size,
debug=debug
)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def collect_weights(
model_path: str,
tensors: List[str],
output_directory: str = 'results',
debug: bool = False,
**kwargs
) -> List[str]:
"""
Loads a pretrained model and collects weights.
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param tensors: (list, default: `None`) List of tensor names to collect
weights
:param output_directory: (str, default: `'results'`) the directory where
collected weights will be stored.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[str]) list of filepath to `*.npy` files containing
the weights.
"""
logger.info('Model path: {}'.format(model_path))
logger.info('Output path: {}'.format(output_directory))
logger.info('\n')
model = LudwigModel.load(model_path)
# collect weights
print_boxed('COLLECT WEIGHTS')
collected_tensors = model.collect_weights(tensors)
# saving
os.makedirs(output_directory, exist_ok=True)
saved_filenames = save_tensors(collected_tensors, output_directory)
logger.info('Saved to: {0}'.format(output_directory))
return saved_filenames
def save_tensors(collected_tensors, output_directory):
filenames = []
for tensor_name, tensor_value in collected_tensors:
np_filename = os.path.join(
output_directory,
make_safe_filename(tensor_name) + '.npy'
)
np.save(np_filename, tensor_value.numpy())
filenames.append(np_filename)
return filenames
def print_model_summary(
model_path: str,
**kwargs
) -> None:
"""
Loads a pretrained model and prints names of weights and layers activations.
# Inputs
:param model_path: (str) filepath to pre-trained model.
# Return
:return: (`None`)
"""
model = LudwigModel.load(model_path)
collected_tensors = model.collect_weights()
names = [name for name, w in collected_tensors]
keras_model = model.model.get_connected_model(training=False)
keras_model.summary()
print('\nLayers:\n')
for layer in keras_model.layers:
print(layer.name)
print('\nWeights:\n')
for name in names:
print(name)
def cli_collect_activations(sys_argv):
"""Command Line Interface to communicate with the collection of tensors and
there are several options that can specified when calling this function:
--data_csv: Filepath for the input csv
--data_hdf5: Filepath for the input hdf5 file, if there is a csv file, this
is not read
--d: Refers to the dataset type of the file being read, by default is
*generic*
--s: Refers to the split of the data, can be one of: train, test,
validation, full
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--bs: Batch size
--g: Number of gpus that are to be used
--gf: Fraction of each GPUs memory to use.
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model and uses it collect '
'tensors for each datapoint in the dataset.',
prog='ludwig collect_activations',
usage='%(prog)s [options]')
# ---------------
# Data parameters
# ---------------
parser.add_argument(
'--dataset',
help='input data file path',
required=True
)
parser.add_argument(
'--data_format',
help='format of the input data',
default='auto',
choices=['auto', 'csv', 'excel', 'feather', 'fwf', 'hdf5',
'html' 'tables', 'json', 'jsonl', 'parquet', 'pickle', 'sas',
'spss', 'stata', 'tsv']
)
parser.add_argument(
'-s',
'--split',
default=FULL,
choices=[TRAINING, VALIDATION, TEST, FULL],
help='the split to obtain the model activations from'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-lyr',
'--layers',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Generic parameters
# ------------------
parser.add_argument(
'-bs',
'--batch_size',
type=int,
default=128,
help='size of batches'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-g',
'--gpus',
type=int,
default=0,
help='list of gpu to use'
)
parser.add_argument(
'-gml',
'--gpu_memory_limit',
type=int,
default=None,
help='maximum memory in MB to allocate per GPU device'
)
parser.add_argument(
'-dpt',
'--disable_parallel_threads',
action='store_false',
dest='allow_parallel_threads',
help='disable TensorFlow from using multithreading for reproducibility'
)
parser.add_argument(
"-b",
"--backend",
help='specifies backend to use for parallel / distributed execution, '
'defaults to local execution or Horovod if called using horovodrun',
choices=ALL_BACKENDS,
)
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Activations', LUDWIG_VERSION)
collect_activations(**vars(args))
def cli_collect_weights(sys_argv):
"""Command Line Interface to collecting the weights for the model
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--t: Tensors to collect
--od: Output directory of the model, defaults to results
--dbg: Debug if the model is to be started with python debugger
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and uses it collect weights.',
prog='ludwig collect_weights',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
parser.add_argument(
'-t',
'--tensors',
help='tensors to collect',
nargs='+',
required=True
)
# -------------------------
# Output results parameters
# -------------------------
parser.add_argument(
'-od',
'--output_directory',
type=str,
default='results',
help='directory that contains the results'
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-dbg',
'--debug',
action='store_true',
default=False,
help='enables debugging mode'
)
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Weights', LUDWIG_VERSION)
collect_weights(**vars(args))
def cli_collect_summary(sys_argv):
"""Command Line Interface to collecting a summary of the model layers and weights.
--m: Input model that is necessary to collect to the tensors, this is a
required *option*
--v: Verbose: Defines the logging level that the user will be exposed to
"""
parser = argparse.ArgumentParser(
description='This script loads a pretrained model '
'and prints names of weights and layers activations '
'to use with other collect commands',
prog='ludwig collect_summary',
usage='%(prog)s [options]'
)
# ----------------
# Model parameters
# ----------------
parser.add_argument(
'-m',
'--model_path',
help='model to load',
required=True
)
# ------------------
# Runtime parameters
# ------------------
parser.add_argument(
'-l',
'--logging_level',
default='info',
help='the level of logging to use',
choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']
)
args = parser.parse_args(sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger('ludwig').setLevel(
args.logging_level
)
global logger
logger = logging.getLogger('ludwig.collect')
print_ludwig('Collect Summary', LUDWIG_VERSION)
print_model_summary(**vars(args))
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == 'activations':
contrib_command("collect_activations", *sys.argv)
cli_collect_activations(sys.argv[2:])
elif sys.argv[1] == 'weights':
contrib_command("collect_weights", *sys.argv)
cli_collect_weights(sys.argv[2:])
elif sys.argv[1] == 'names':
contrib_command("collect_summary", *sys.argv)
cli_collect_summary(sys.argv[2:])
else:
print('Unrecognized command')
else:
print('Unrecognized command')
| uber/ludwig | ludwig/collect.py | Python | apache-2.0 | 15,469 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for Android contacts2.db database events."""
from plaso.lib import eventdata
class AndroidCallFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for Android call history events."""
DATA_TYPE = 'android:event:call'
FORMAT_STRING_PIECES = [
u'{call_type}',
u'Number: {number}',
u'Name: {name}',
u'Duration: {duration} seconds']
FORMAT_STRING_SHORT_PIECES = [u'{call_type} Call']
SOURCE_LONG = 'Android Call History'
SOURCE_SHORT = 'LOG'
| iwm911/plaso | plaso/formatters/android_calls.py | Python | apache-2.0 | 1,199 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'with_shape',
'with_same_shape']
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
return math_ops.add_n(tensors, name=scope)
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def is_tensor(x):
"""Check for tensor types.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, ops.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or is_tensor(expected_shape):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if not is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
`Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
| naturali/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | Python | apache-2.0 | 11,844 |
import os
import unittest
import synapse
import synapse.lib.datfile as s_datfile
from synapse.tests.common import *
syndir = os.path.dirname(synapse.__file__)
class DatFileTest(SynTest):
def test_datfile_basic(self):
with s_datfile.openDatFile('synapse.tests/test.dat') as fd:
self.nn(fd)
self.eq(fd.read(), b'woot\n')
| vivisect/synapse | synapse/tests/test_lib_datfile.py | Python | apache-2.0 | 360 |
from django.test import TestCase
from django.core.urlresolvers import reverse
from working_waterfronts.working_waterfronts_api.models import Video
from django.contrib.auth.models import User
class EditVideoTestCase(TestCase):
"""
Test that the Edit Video page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the update of the video
object with the specified ID
"""
fixtures = ['test_fixtures']
def setUp(self):
user = User.objects.create_user(
'temporary', '[email protected]', 'temporary')
user.save()
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('edit-video', kwargs={'id': '1'}))
self.assertRedirects(response, '/login?next=/entry/videos/1')
def test_url_endpoint(self):
url = reverse('edit-video', kwargs={'id': '1'})
self.assertEqual(url, '/entry/videos/1')
def test_successful_video_update(self):
"""
POST a proper "update video" command to the server, and see if
the update appears in the database
"""
# Data that we'll post to the server to get the new video created
new_video = {
'caption': "A thrilling display of utmost might",
'name': "You won't believe number 3!",
'video': 'http://www.youtube.com/watch?v=dQw4w9WgXcQ'}
self.client.post(
reverse('edit-video', kwargs={'id': '1'}),
new_video)
video = Video.objects.get(id=1)
for field in new_video:
self.assertEqual(
getattr(video, field), new_video[field])
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(
reverse('edit-video', kwargs={'id': '1'}))
fields = {
'name': 'A Starship',
'caption': "Traveling at the speed of light!",
'video': 'http://www.youtube.com/watch?v=efgDdSWDg0g'
}
form = response.context['video_form']
for field in fields:
self.assertEqual(fields[field], form[field].value())
def test_delete_video(self):
"""
Tests that DELETing entry/videos/<id> deletes the item
"""
response = self.client.delete(
reverse('edit-video', kwargs={'id': '2'}))
self.assertEqual(response.status_code, 200)
with self.assertRaises(Video.DoesNotExist):
Video.objects.get(id=2)
response = self.client.delete(
reverse('edit-video', kwargs={'id': '2'}))
self.assertEqual(response.status_code, 404)
| osu-cass/working-waterfronts-api | working_waterfronts/working_waterfronts_api/tests/views/entry/test_edit_video.py | Python | apache-2.0 | 2,965 |
from textwrap import dedent
import pytest
import salt.modules.pdbedit as pdbedit
from tests.support.mock import MagicMock, patch
@pytest.fixture(autouse=True)
def setup_loader(request):
setup_loader_modules = {pdbedit: {}}
with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
yield loader_mock
@pytest.mark.parametrize("verbose", [True, False])
def test_when_no_users_returned_no_data_should_be_returned(verbose):
expected_users = {} if verbose else []
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": "", "retcode": 0}
)
},
):
actual_users = pdbedit.list_users(verbose=verbose)
assert actual_users == expected_users
def test_when_verbose_and_retcode_is_nonzero_output_should_be_had():
expected_stderr = "this is something fnord"
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": "", "stderr": expected_stderr, "retcode": 1}
)
},
), patch("salt.modules.pdbedit.log.error", autospec=True) as fake_error_log:
pdbedit.list_users(verbose=True)
actual_error = fake_error_log.mock_calls[0].args[0]
assert actual_error == expected_stderr
def test_when_verbose_and_single_good_output_expected_data_should_be_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"full name": "Roscivs Bottia",
"user sid": "42",
"primary group sid": "99",
"home directory": r"\\samba\roscivs",
"account desc": "separators! xxx so long and thanks for all the fish",
"logoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"kickoff time": "Sat, 14 Aug 2010 15:06:39 UTC",
"password must change": "never",
}
}
pdb_output = dedent(
r"""
Unix username: roscivs
NT username: bottia
User SID: 42
Primary Group SID: 99
Full Name: Roscivs Bottia
Home Directory: \\samba\roscivs
Account desc: separators! xxx so long and thanks for all the fish
Logoff time: Sat, 14 Aug 2010 15:06:39 UTC
Kickoff time: Sat, 14 Aug 2010 15:06:39 UTC
Password must change: never
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
def test_when_verbose_and_multiple_records_present_data_should_be_correctly_parsed():
expected_data = {
"roscivs": {
"unix username": "roscivs",
"nt username": "bottia",
"user sid": "42",
},
"srilyk": {
"unix username": "srilyk",
"nt username": "srilyk",
"account desc": "trololollol",
"user sid": "99",
},
"jewlz": {
"unix username": "jewlz",
"nt username": "flutterbies",
"user sid": "4",
},
}
pdb_output = dedent(
"""
-------------
Unix username: roscivs
NT username: bottia
User SID: 42
-------------
Unix username: srilyk
NT username: srilyk
User SID: 99
Account desc: trololol\x1dlol
-------------
Unix username: jewlz
NT username: flutterbies
User SID: 4
-------------
-------------
-------------
"""
).strip()
with patch.dict(
pdbedit.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"stdout": pdb_output, "stderr": "", "retcode": 0}
)
},
):
actual_data = pdbedit.list_users(verbose=True)
assert actual_data == expected_data
| saltstack/salt | tests/pytests/unit/modules/test_pdbedit.py | Python | apache-2.0 | 4,278 |
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import *
from leela.client.sensors.linux import disk_usage
def test_disk_usage_sensor_is_stateless():
sensor = disk_usage.DiskUsage()
ok_([] != sensor.measure())
def test_disk_usage_sensor_produces_core_metrics():
sensor = disk_usage.DiskUsage()
events = [e.name() for e in sensor.measure()]
ok_(reduce(lambda acc, e: acc or e.endswith(".total"), events, False))
ok_(reduce(lambda acc, e: acc or e.endswith(".used"), events, False))
ok_(reduce(lambda acc, e: acc or e.endswith(".free"), events, False))
| locaweb/leela-client | try/python/test_leela/test_client/test_sensors/test_linux/test_disk_usage.py | Python | apache-2.0 | 1,191 |
# The content of this file was generated using the Python profile of libCellML 0.2.0.
from enum import Enum
from math import *
__version__ = "0.3.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 4
VARIABLE_COUNT = 18
class VariableType(Enum):
VARIABLE_OF_INTEGRATION = 1
STATE = 2
CONSTANT = 3
COMPUTED_CONSTANT = 4
ALGEBRAIC = 5
EXTERNAL = 6
VOI_INFO = {"name": "time", "units": "millisecond", "component": "environment", "type": VariableType.VARIABLE_OF_INTEGRATION}
STATE_INFO = [
{"name": "m", "units": "dimensionless", "component": "sodium_channel_m_gate", "type": VariableType.STATE},
{"name": "h", "units": "dimensionless", "component": "sodium_channel_h_gate", "type": VariableType.STATE},
{"name": "n", "units": "dimensionless", "component": "potassium_channel_n_gate", "type": VariableType.STATE},
{"name": "V", "units": "millivolt", "component": "membrane", "type": VariableType.STATE}
]
VARIABLE_INFO = [
{"name": "g_L", "units": "milliS_per_cm2", "component": "leakage_current", "type": VariableType.CONSTANT},
{"name": "Cm", "units": "microF_per_cm2", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "E_R", "units": "millivolt", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "g_K", "units": "milliS_per_cm2", "component": "potassium_channel", "type": VariableType.CONSTANT},
{"name": "g_Na", "units": "milliS_per_cm2", "component": "sodium_channel", "type": VariableType.CONSTANT},
{"name": "i_Stim", "units": "microA_per_cm2", "component": "membrane", "type": VariableType.ALGEBRAIC},
{"name": "E_L", "units": "millivolt", "component": "leakage_current", "type": VariableType.EXTERNAL},
{"name": "i_L", "units": "microA_per_cm2", "component": "leakage_current", "type": VariableType.ALGEBRAIC},
{"name": "E_Na", "units": "millivolt", "component": "sodium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_Na", "units": "microA_per_cm2", "component": "sodium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "alpha_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "E_K", "units": "millivolt", "component": "potassium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_K", "units": "microA_per_cm2", "component": "potassium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC}
]
def leq_func(x, y):
return 1.0 if x <= y else 0.0
def geq_func(x, y):
return 1.0 if x >= y else 0.0
def and_func(x, y):
return 1.0 if bool(x) & bool(y) else 0.0
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialise_states_and_constants(states, variables):
variables[0] = 0.3
variables[1] = 1.0
variables[2] = 0.0
variables[3] = 36.0
variables[4] = 120.0
states[0] = 0.05
states[1] = 0.6
states[2] = 0.325
states[3] = 0.0
def compute_computed_constants(variables):
variables[8] = variables[2]-115.0
variables[14] = variables[2]+12.0
def compute_rates(voi, states, rates, variables, external_variable):
variables[10] = 0.1*(states[3]+25.0)/(exp((states[3]+25.0)/10.0)-1.0)
variables[11] = 4.0*exp(states[3]/18.0)
rates[0] = variables[10]*(1.0-states[0])-variables[11]*states[0]
variables[12] = 0.07*exp(states[3]/20.0)
variables[13] = 1.0/(exp((states[3]+30.0)/10.0)+1.0)
rates[1] = variables[12]*(1.0-states[1])-variables[13]*states[1]
variables[16] = 0.01*(states[3]+10.0)/(exp((states[3]+10.0)/10.0)-1.0)
variables[17] = 0.125*exp(states[3]/80.0)
rates[2] = variables[16]*(1.0-states[2])-variables[17]*states[2]
variables[5] = -20.0 if and_func(geq_func(voi, 10.0), leq_func(voi, 10.5)) else 0.0
variables[6] = external_variable(voi, states, rates, variables, 6)
variables[7] = variables[0]*(states[3]-variables[6])
variables[15] = variables[3]*pow(states[2], 4.0)*(states[3]-variables[14])
variables[9] = variables[4]*pow(states[0], 3.0)*states[1]*(states[3]-variables[8])
rates[3] = -(-variables[5]+variables[9]+variables[15]+variables[7])/variables[1]
def compute_variables(voi, states, rates, variables, external_variable):
variables[7] = variables[0]*(states[3]-variables[6])
variables[9] = variables[4]*pow(states[0], 3.0)*states[1]*(states[3]-variables[8])
variables[10] = 0.1*(states[3]+25.0)/(exp((states[3]+25.0)/10.0)-1.0)
variables[11] = 4.0*exp(states[3]/18.0)
variables[12] = 0.07*exp(states[3]/20.0)
variables[13] = 1.0/(exp((states[3]+30.0)/10.0)+1.0)
variables[15] = variables[3]*pow(states[2], 4.0)*(states[3]-variables[14])
variables[16] = 0.01*(states[3]+10.0)/(exp((states[3]+10.0)/10.0)-1.0)
variables[17] = 0.125*exp(states[3]/80.0)
| nickerso/libcellml | tests/resources/generator/hodgkin_huxley_squid_axon_model_1952/model.computed.constant.py | Python | apache-2.0 | 5,432 |
# coding=utf-8
from setuptools import setup
from setuptools.command.test import test
class TestHook(test):
def run_tests(self):
import nose
nose.main(argv=['nosetests', 'tests/', '-v', '--logging-clear-handlers'])
setup(
name='lxml-asserts',
version='0.1.2',
description='Handy functions for testing lxml etree objects for equality and compatibility',
url='https://github.com/SuminAndrew/lxml-asserts',
author='Andrew Sumin',
author_email='[email protected]',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Testing',
],
license="http://www.apache.org/licenses/LICENSE-2.0",
cmdclass={
'test': TestHook
},
packages=[
'lxml_asserts'
],
install_requires=[
'lxml',
],
test_suite='tests',
tests_require=[
'nose',
'pycodestyle == 2.3.1'
],
zip_safe=False
)
| SuminAndrew/lxml-asserts | setup.py | Python | apache-2.0 | 1,329 |
"""
Initialization script for restapi for the application.
"""
from flask import Blueprint
from app.common.logging import setup_logging
api = Blueprint('api', __name__)
# Setup logger
# api_log = setup_logging(__name__, 'logs/api.log', maxFilesize=1000000,
# backup_count=5)
from . import views, errors
| ibbad/dna-lceb-web | app/api_v1_0/__init__.py | Python | apache-2.0 | 331 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-appengine-admin
# [START appengine_v1_generated_Instances_DeleteInstance_sync]
from google.cloud import appengine_admin_v1
def sample_delete_instance():
# Create a client
client = appengine_admin_v1.InstancesClient()
# Initialize request argument(s)
request = appengine_admin_v1.DeleteInstanceRequest(
)
# Make the request
operation = client.delete_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END appengine_v1_generated_Instances_DeleteInstance_sync]
| googleapis/python-appengine-admin | samples/generated_samples/appengine_v1_generated_instances_delete_instance_sync.py | Python | apache-2.0 | 1,530 |
# python3
class HeapBuilder:
def __init__(self):
self._swaps = []
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def GenerateSwaps(self):
# The following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a heap,
# but in the worst case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
for i in range(len(self._data)):
for j in range(i + 1, len(self._data)):
if self._data[i] > self._data[j]:
self._swaps.append((i, j))
self._data[i], self._data[j] = self._data[j], self._data[i]
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
| xunilrj/sandbox | courses/coursera-sandiego-algorithms/data-structures/assignment002/make_heap/build_heap.py | Python | apache-2.0 | 1,100 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that is responsible for building and assessing proposed.
bonding patterns.
"""
import operator
from typing import List, Optional
import numpy as np
from smu import dataset_pb2
from smu.parser import smu_utils_lib
class MatchingParameters:
"""A class to specify optional matching parameters for SmuMolecule.place_bonds."""
def __init__(self):
self._must_match_all_bonds: bool = True
self._smiles_with_h: bool = False
self._smiles_with_labels: bool = True
# A variant on matching is to consider all N and O as neutral forms during
# matching, and then as a post processing step, see whether a valid,
# neutral, molecule can be formed.
self._neutral_forms_during_bond_matching: bool = False
# If not a bond is being considered during matching.
self._consider_not_bonded = False
# Avoid destroying rings if not bonded is enabled.
# Note that only the ring atom count is considered.
self._ring_atom_count_cannot_decrease = True
@property
def must_match_all_bonds(self):
return self._must_match_all_bonds
@must_match_all_bonds.setter
def must_match_all_bonds(self, value):
self._must_match_all_bonds = value
@property
def smiles_with_h(self):
return self._smiles_with_h
@smiles_with_h.setter
def smiles_with_h(self, value):
self._smiles_with_h = value
@property
def smiles_with_labels(self):
return self._smiles_with_labels
@smiles_with_labels.setter
def smiles_with_labels(self, value):
self._smiles_with_labels = value
@property
def neutral_forms_during_bond_matching(self):
return self._neutral_forms_during_bond_matching
@neutral_forms_during_bond_matching.setter
def neutral_forms_during_bond_matching(self, value):
self._neutral_forms_during_bond_matching = value
@property
def consider_not_bonded(self):
return self._consider_not_bonded
@consider_not_bonded.setter
def consider_not_bonded(self, value):
self._consider_not_bonded = value
@property
def ring_atom_count_cannot_decrease(self):
return self._ring_atom_count_cannot_decrease
@ring_atom_count_cannot_decrease.setter
def ring_atom_count_cannot_decrease(self, value):
self._ring_atom_count_cannot_decrease = value
def add_bond(a1, a2, btype, destination):
"""Add a new Bond to `destination`.
Args:
a1: atom
a2: atom
btype: bond type.
destination:
"""
destination.bonds.append(
dataset_pb2.BondTopology.Bond(
atom_a=a1,
atom_b=a2,
bond_type=smu_utils_lib.INTEGER_TO_BOND_TYPE[btype]))
class SmuMolecule:
"""Holds information about partially built molecules."""
def __init__(self, hydrogens_attached, bonds_to_scores, matching_parameters):
"""Class to perform bonding assessments.
Args:
hydrogens_attached: a BondTopology that has all atoms, and the bonds
associated with the Hydrogen atoms.
bonds_to_scores: A dict that maps tuples of pairs of atoms, to a numpy
array of scores [0,3], for each possible bond type.
matching_parameters: contains possible optional behaviour modifiers.
Returns:
"""
self._starting_bond_topology = hydrogens_attached
self._natoms = len(hydrogens_attached.atoms)
self._heavy_atoms = sum(1 for atom in hydrogens_attached.atoms
if atom != dataset_pb2.BondTopology.ATOM_H)
self._contains_both_oxygen_and_nitrogen = False
# If the molecule contains both N and O atoms, then we can
# do more extensive atom type matching if requested.
if matching_parameters.neutral_forms_during_bond_matching:
self.set_contains_both_oxygen_and_nitrogen(hydrogens_attached)
# For each atom, the maximum number of bonds that can be attached.
self._max_bonds = np.zeros(self._natoms, dtype=np.int32)
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS_ANY_FORM[
hydrogens_attached.atoms[i]]
else:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS[
hydrogens_attached.atoms[i]]
# With the Hydrogens attached, the number of bonds to each atom.
self._bonds_with_hydrogens_attached = np.zeros((self._natoms),
dtype=np.int32)
for bond in hydrogens_attached.bonds:
self._bonds_with_hydrogens_attached[bond.atom_a] += 1
self._bonds_with_hydrogens_attached[bond.atom_b] += 1
self._current_bonds_attached = np.zeros((self._natoms), dtype=np.int32)
# We turn bonds_to_scores into two arrays. So they can be iterated
# via itertools.
self._bonds = list(bonds_to_scores.keys())
self._scores = list(bonds_to_scores.values())
# Initialize for probability type accumulation
self._initial_score = 1.0
self._accumulate_score = operator.mul
# For testing, it can be convenient to allow for partial matches
# For example this allows matching C-C and C=C without the need
# to add explicit hydrogens
self._must_match_all_bonds = matching_parameters.must_match_all_bonds
def set_contains_both_oxygen_and_nitrogen(self, bt):
"""Examine `bt` and set self._contains_both_oxygen_and_nitrogen.
Args:
bt: BondTopology
"""
self._contains_both_oxygen_and_nitrogen = False
oxygen_count = 0
nitrogen_count = 0
for atom in bt.atoms:
if atom in [
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_NPOS
]:
nitrogen_count += 1
elif atom in [
dataset_pb2.BondTopology.ATOM_O, dataset_pb2.BondTopology.ATOM_ONEG
]:
oxygen_count += 1
if oxygen_count > 0 and nitrogen_count > 0:
self._contains_both_oxygen_and_nitrogen = True
def set_initial_score_and_incrementer(self, initial_score, op):
"""Update values used for computing scores."""
self._initial_score = initial_score
self._accumulate_score = op
def _initialize(self):
"""Make the molecule ready for adding bonds between heavy atoms."""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
def _place_bond(self, a1, a2, btype):
"""Possibly add a new bond to the current config.
If the bond can be placed, updates self._current_bonds_attached for
both `a`` and `a2`.
Args:
a1:
a2:
btype:
Returns:
Bool.
"""
if self._current_bonds_attached[a1] + btype > self._max_bonds[a1]:
return False
if self._current_bonds_attached[a2] + btype > self._max_bonds[a2]:
return False
self._current_bonds_attached[a1] += btype
self._current_bonds_attached[a2] += btype
return True
def generate_search_state(self):
"""For each pair of atoms, return a list of plausible bond types.
This will be passed to itertools.product, which thereby enumerates all
possible bonding combinations.
Args:
Returns:
List of lists - one for each atom pair.
"""
result: List[List[int]] = []
for ndx in range(0, len(self._bonds)):
# For each pair of atoms, the plausible bond types - non zero score.
plausible_types: List[int] = []
for i, score in enumerate(self._scores[ndx]):
if score > 0.0:
plausible_types.append(i)
result.append(plausible_types)
return result
def place_bonds_inner(self, state):
"""Place bonds corresponding to `state`.
No validity checking is done, the calling function is responsible
for that.
Args:
state: for each pair of atoms, the kind of bond to be placed.
Returns:
If successful, a BondTopology.
"""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
result = dataset_pb2.BondTopology()
result.CopyFrom(self._starting_bond_topology) # only Hydrogens attached.
result.score = self._initial_score
# Make sure each atoms gets at least one bond
atom_got_bond = np.zeros(self._heavy_atoms)
for i, btype in enumerate(state):
if btype != dataset_pb2.BondTopology.BOND_UNDEFINED:
a1 = self._bonds[i][0]
a2 = self._bonds[i][1]
if not self._place_bond(a1, a2, btype):
return None
add_bond(a1, a2, btype, result)
atom_got_bond[a1] = 1
atom_got_bond[a2] = 1
result.score = self._accumulate_score(result.score,
self._scores[i][btype])
if not np.all(atom_got_bond):
return None
return result
def place_bonds(
self, state, matching_parameters
):
"""Place bonds corresponding to `state`.
Args:
state: bonding pattern to be placed.
matching_parameters: optional settings
Returns:
If successful, a BondTopology
"""
bt = self.place_bonds_inner(state)
if not bt:
return None
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
if not self.assign_charged_atoms(bt):
return None
# all bonds matched has already been checked.
return bt
# Optionally check whether all bonds have been matched
if not self._must_match_all_bonds:
return bt
if not np.array_equal(self._current_bonds_attached, self._max_bonds):
return None
return bt
def assign_charged_atoms(self, bt):
"""Assign (N, N+) and (O, O-) possibilities in `bt`.
bt must contain both N and O atoms.
Note that we assume _must_match_all_bonds, and return None if that cannot
be achieved.
Args:
bt: BondTopology, bt.atoms are updated in place
Returns:
True if successful, False otherwise
"""
carbon = dataset_pb2.BondTopology.ATOM_C
hydrogen = dataset_pb2.BondTopology.ATOM_H
fluorine = dataset_pb2.BondTopology.ATOM_F
nitrogen = dataset_pb2.BondTopology.ATOM_N
npos = dataset_pb2.BondTopology.ATOM_NPOS
oxygen = dataset_pb2.BondTopology.ATOM_O
oneg = dataset_pb2.BondTopology.ATOM_ONEG
net_charge = 0
for i, atom in enumerate(bt.atoms):
if atom in [carbon, hydrogen, fluorine]:
if self._max_bonds[i] != self._current_bonds_attached[i]:
return False
elif atom in [nitrogen, npos]:
if self._current_bonds_attached[i] == 4:
bt.atoms[i] = npos
net_charge += 1
elif self._current_bonds_attached[i] == 3:
bt.atoms[i] = nitrogen
else:
return False
elif atom in [oxygen, oneg]:
if self._current_bonds_attached[i] == 2:
bt.atoms[i] = oxygen
elif self._current_bonds_attached[i] == 1:
bt.atoms[i] = oneg
net_charge -= 1
else: # not attached.
return False
if net_charge != 0:
return False
return True
| google-research/google-research | smu/geometry/smu_molecule.py | Python | apache-2.0 | 12,148 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
import sys, os
from os.path import abspath, dirname, join
from warnings import warn
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
# Filesystem layout
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
# Initialize additional path variables
# Defaults for these are set after local_settings is imported
CONTENT_DIR = ''
CSS_DIR = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
# Cluster settings
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
#Remote rendering settings
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
#Miscellaneous settings
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = (
'graphite.finders.standard.StandardFinder',
)
#Authentication settings
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
#Set this to True to delegate authentication to the web server
USE_REMOTE_USER_AUTHENTICATION = False
# Django 1.5 requires this so we set a default but warn the user
SECRET_KEY = 'UNSAFE_DEFAULT'
# Django 1.5 requires this to be set. Here we default to prior behavior and allow all
ALLOWED_HOSTS = [ '*' ]
# Override to link a different URL for login (e.g. for django_openid_auth)
LOGIN_URL = '/account/login'
# Set to True to require authentication to save or delete dashboards
DASHBOARD_REQUIRE_AUTHENTICATION = False
# Require Django change/delete permissions to save or delete dashboards.
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_PERMISSIONS = False
# Name of a group to which the user must belong to save or delete dashboards. Alternative to
# DASHBOARD_REQUIRE_PERMISSIONS, particularly useful when using only LDAP (without Admin app)
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = {
'default': {
'NAME': '/opt/graphite/storage/graphite.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# If using rrdcached, set to the address or socket of the daemon
FLUSHRRDCACHED = ''
## Load our local_settings
try:
from graphite.local_settings import *
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
## Load Django settings if they werent picked up in local_settings
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import *
## Set config dependent on flags set in local_settings
# Path configuration
if not CONTENT_DIR:
CONTENT_DIR = join(WEBAPP_DIR, 'content')
if not CSS_DIR:
CSS_DIR = join(CONTENT_DIR, 'css')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATES_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/')
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
# Default sqlite db file
# This is set here so that a user-set STORAGE_DIR is available
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
# Caching shortcuts
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
# Authentication shortcuts
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'graphite.account.ldapBackend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
| g76r/graphite-web | webapp/graphite/settings.py | Python | apache-2.0 | 6,812 |
import time
def current_millis():
return int(round(time.time() * 1000))
| bugy/rebuilder | utils/date_utils.py | Python | apache-2.0 | 78 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import openstack.common.context
from openstack.common.middleware import context
from openstack.common import test
class ContextMiddlewareTest(test.BaseTestCase):
def test_process_request(self):
req = mock.Mock()
app = mock.Mock()
options = mock.MagicMock()
ctx = mock.sentinel.context
with mock.patch.object(context.ContextMiddleware,
'make_context',
mock.Mock(return_value=ctx)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.process_request(req)
self.assertEqual(req.context, ctx)
def test_make_context(self):
app = mock.Mock()
options = mock.MagicMock()
with mock.patch.object(openstack.common.context.RequestContext,
'__init__',
mock.Mock(return_value=None)) as init:
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
init.assert_called_with(mock.sentinel.arg)
def test_make_explicit_context(self):
app = mock.Mock()
import_class = mock.Mock()
options = {'context_class': mock.sentinel.context_class}
with mock.patch('openstack.common.importutils.import_class',
mock.Mock(return_value=import_class)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
import_class.assert_called_with(mock.sentinel.arg)
class FilterFactoryTest(test.BaseTestCase):
def test_filter_factory(self):
global_conf = dict(sentinel=mock.sentinel.global_conf)
app = mock.sentinel.app
target = 'openstack.common.middleware.context.ContextMiddleware'
def check_ctx_middleware(arg_app, arg_conf):
self.assertEqual(app, arg_app)
self.assertEqual(global_conf['sentinel'], arg_conf['sentinel'])
return mock.DEFAULT
with mock.patch(target,
mock.Mock(return_value=mock.sentinel.ctx)) as mid:
mid.side_effect = check_ctx_middleware
filter = context.filter_factory(global_conf)
self.assertEqual(filter(app), mock.sentinel.ctx)
| JioCloud/oslo-incubator | tests/unit/middleware/test_context.py | Python | apache-2.0 | 3,034 |
import unittest
import config_test
from backupcmd.commands import backupCommands
class BackupCommandsTestCase(unittest.TestCase):
"""Test commands passed to main script"""
def test_hyphen_r_option(self):
print 'Pending BackupCommandsTestCase'
self.assertEqual(1,1)
| ChinaNetCloud/nc-backup-py | nc-backup-py/tests/backup_main_commands_test.py | Python | apache-2.0 | 293 |
#! /usr/bin/env python
"""
Simulate DSR over a network of nodes.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-26 21:51:40 -0500 (Wed, 26 Oct 2011) $
* $LastChangedRevision: 5314 $
:author: Ketan Mandke <[email protected]>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from SimPy.Simulation import *
from scapy.all import *
from wins import *
from wins.ieee80211 import *
from copy import copy, deepcopy
from wins.backend import RNG_init
from wins.backend import *
from wins.mac import RBAR, ARF
from wins.net import DSR
from wins.traffic import Agent
import sys
from optparse import OptionParser
import numpy as np
import struct
import gc
import time
RNG_INIT = 1
EXIT_WITH_TRACE = 1
class Node(Element):
name = "node"
tracename = "NODE"
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def configure(self, pos=None, # motion \
useshared=False, # arp \
cfocorrection=True, # phy \
usecsma=False, # mac \
rreqrate=None, datarate=None, # net \
dest=None, plen=None, delay=None, mode=None, # agent \
**kwargs):
cif = self.newchild('cif', Dot11NRadio)
phy = self.newchild('phy', Dot11NPHY, radio=cif, cfocorrection=cfocorrection)
mac = self.newchild('mac', DCF, usecsma=usecsma, phy=phy)
net = self.newchild('net', DSR, rreqrate=rreqrate, datarate=datarate)
arp = self.newchild('arp', ARP, useshared=useshared)
agt = self.newchild('agent', Agent, dest=dest, plen=plen, \
delay=delay, mode=mode)
mobi = self.newchild('motion', Motion, pos=pos)
# connect ports
agt.connect(net)
arp.connect(net, mac)
mac.connect(phy)
phy.connect(cif)
def read_topo(options, topofile):
"""Read topology layout from file."""
f = file(topofile, 'r')
s = f.readline()
topo = {'border':None, 'layout':None}
done = not (s)
while not done:
# convert s to dict (check for border and layout)
try:
d = eval(s)
assert isinstance(d, dict)
assert ('border' in d) and ('layout' in d)
except:
d = None
# add dict to topo
if d: topo.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return topo
def read_route(options, routefile):
"""Read routing tables from file."""
f = file(routefile, 'r')
s = f.readline()
routedata = {}
done = not (s)
while not done:
# convert s to dict
try:
d = eval(s)
assert isinstance(d, dict)
for x,y in d.items():
# maps src x -> routing table y
assert isinstance(y, dict)
for a,b in y.items():
# maps dst a -> info b (for route table y)
assert ('index' in b)
assert ('list' in b)
except:
d = None
# add dict to routedata
if d: routedata.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return routedata
def get_topology(options, numnodes):
"""Get/create topology."""
# load topology from file
if options.usetopo:
topofile = options.usetopo
topo = read_topo(options, topofile)
border = topo['border']
layout = topo['layout']
xmin, xmax, ymin, ymax = border[:4]
assert (len(layout)>=numnodes)
return topo
# create new topology
assert (options.xmin<=options.xmax)
assert (options.ymin<=options.ymax)
xmin, xmax = options.xmin, options.xmax
ymin, ymax = options.ymin, options.ymax
border = (xmin, xmax, ymin, ymax)
# use uniform distribution for layout
xpos = np.random.uniform(xmin, xmax, numnodes)
ypos = np.random.uniform(ymin, ymax, numnodes)
layout = [(xpos[k],ypos[k]) for k in range(numnodes)]
# verify layout parameters
assert (len(layout)>=numnodes)
topo = {'border':border, 'layout':layout}
return topo
def set_routing(options, nodelist):
"""Set routing tables if needed."""
if not options.useroute: return
routefile = options.useroute
rdata = read_route(options, routefile)
for n in nodelist:
addr = n.net.address
if addr not in rdata: continue
for dst, data in rdata[addr].items():
paths = data['list']
for c,ts,nh in paths:
n.net.addroute(dst, nexthop=nh, cost=c)
return rdata
def run_experiment(options):
# record start time
starttime = time.time()
# initialize RNG
if RNG_INIT: RNG_init()
# set SIMULATION parameters
mon = Element(tracename="MON")
verbose = options.verbose
stoptime = 2.0
if not (options.stop<0): stoptime = options.stop
stoptime *= 1.05 # allow events around stoptime to finish
simargs = {'verbose':verbose}
# set EXPERIMENT parameters
ntx, nrx = 1, 1
numnodes = options.numnodes
nconnect = options.nconnect
assert (nconnect>0)
assert (numnodes>=2*nconnect)
# set CHANNEL parameters
alpha = options.alpha
modeltype = options.tgnmodel # default -> LOS Channel
usedoppler = options.usedoppler
usefading = options.usefading
envspeed = options.envspeed
chargs = {'modeltype':modeltype, 'n':alpha, \
'usedoppler':usedoppler, 'usefading':usefading, \
'environmentspeed': envspeed}
chargs.update(simargs)
# set AGENT parameters
mode = options.agent_mode
plen = Agent.DefaultPacketLength
rate = options.rate # transmission rate in packets/second
delay = None
if mode is None: mode = "cbr"
if options.plen>0: plen = options.plen
if (rate>0): delay = 1.0/rate
# set agent delay if not already specified
if delay is None:
cm = Dot11NChannel(**chargs)
chan = Dot11N_Channel(cm.modelnum, nrx, ntx, cm.flags)
delay = 2*chan.coherencetime()
if rate is None: rate = 1.0/delay
agtargs = {'plen': plen, 'mode':mode, 'delay':delay}
# set DSR parameters
rreqrate, datarate = None, None
if 0<=options.rreqrate<8*ntx: rreqrate=options.rreqrate
if 0<=options.datarate<8*ntx: datarate=options.datarate
netargs = {'rreqrate':rreqrate, 'datarate':datarate}
# set other protocol parameters (MAC, ARP, etc.)
useshared = True
arpargs = {'useshared':useshared}
usecsma = False
macargs = {'usecsma':usecsma}
# set phy parameters
Dot11NPHY.usewaveform = options.usewaveform
Dot11NRadio.Ntx, Dot11NRadio.Nrx = ntx, nrx
Dot11NRadio.fomax = options.fomax
cfocorrection = True
if options.disable_cfo_correction: cfocorrection = False
phyargs = {'cfocorrection':cfocorrection}
# set node parameters
nodeargs = {}
nodeargs.update(agtargs)
nodeargs.update(netargs)
nodeargs.update(arpargs)
nodeargs.update(macargs)
nodeargs.update(phyargs)
nodeargs.update(simargs)
############################
# Set Up Simulation
############################
initialize()
# create channel
bidirectional = options.bidirectional
ch = Channel(model=Dot11NChannel, bidirectional=bidirectional, **simargs)
# get topology
topo = get_topology(options, numnodes)
border = topo['border']
layout = topo['layout']
# create nodes
nodelist = []
for k in range(numnodes):
pos = layout[k]
n = Node(pos=pos, **nodeargs)
nodelist.append(n)
n.motion.log("pos", pos=["%.3f"%(p) for p in n.motion.position] )
# connect source/destination pairs
assert (nconnect<len(nodelist))
for k in range(nconnect):
src = nodelist[k] # first N are sources
dst = nodelist[-k-1] # last N are destinations
src.agent.dest = dst.net.address
# set routing tables
set_routing(options, nodelist)
# connect all nodes via channel
for n in nodelist:
for m in nodelist:
if (n is not m):
ch.add_edge(n.cif, m.cif, **chargs)
# create monitor
if options.monitor:
mon = Monitor(period=stoptime/1e4)
mon.start()
############################
# Run Simulation
############################
if options.usetopo:
mon.log("topo", topofile=options.usetopo)
mon.log("model", **chargs)
mon.log("rate", rate="%.5g"%(rate) )
simerror = None
if EXIT_WITH_TRACE:
try:
simulate(until=stoptime)
except Exception, e:
mon.log("SIMERR", error=str(e))
simerror = e
else:
simulate(until=stoptime)
# log remaining trace information
mon.log("stoptime", stoptime="%.6f"%(stoptime))
n = gc.collect()
mon.log("GC", collected=n)
totaltime = time.time() - starttime
t = time.gmtime(totaltime)
mon.log("runtime", runtime="%02d:%02d:%02d (h/m/s)"%(t.tm_hour, t.tm_min, t.tm_sec) )
############################
# Teardown/Cleanup
############################
# print output
sys.stdout.flush()
if options.trace: ch.trace.output()
# write tracefile
if options.output is not None: ch.trace.write(options.output)
# write topofile
if options.savetopo:
f = file(options.savetopo, 'w')
f.write("%s\n"%(topo) )
f.close()
# write routefile
if options.saveroute:
# write data
f = file(options.saveroute, 'w')
for n in nodelist:
addr = n.net.address
rdata = {addr: n.net.table.data.copy()}
f.write("%s\n"%(rdata))
f.close()
# if Exception occurred during simulation ...
if simerror: raise simerror
def main():
usage = "%prog [OPTIONS]"
parser = OptionParser(usage=usage)
# simulation parameters
parser.add_option("-v", "--verbose", dest="verbose", type="int", \
default=ROUTING_VERBOSE+1, help="Set verbose level [default=%default].")
parser.add_option("-t", "--trace", dest="trace", action="store_true", \
default=False, help="Output formatted trace to stdout")
parser.add_option("-o", "--output", dest="output", \
default=None, help="Name of output file for trace")
parser.add_option("-s", "--stop", dest="stop", \
type="float", default=2.0, \
help="Run simulation until stop time [default=%default]")
parser.add_option("-m", "--monitor", dest="monitor", action="store_true", \
default=False, help="Enable simulation montior")
# experiment parameters
parser.add_option("-n", "--num-nodes", dest="numnodes", type="int", \
default=50, help="Set number of nodes [default=%default]")
parser.add_option("-c", "--num-connections", dest="nconnect", type="int", \
default=1, help="Set number of active connections [default=%default]")
# agent parameters
parser.add_option("-r", "--rate", dest="rate", type="float", \
default=None, help="Packets/second generated by a source [default=%default]")
parser.add_option("-l", "--packet-length", dest="plen", type="int", \
default=1024, help="Set packet size in bytes [default=%default]")
parser.add_option("", "--agent-mode", dest="agent_mode", \
default=None, help="Specify traffic mode [options=%s]."%(Agent.TrafficModes))
# net parameters
parser.add_option("", "--rreqrate", dest="rreqrate", type="int", \
default=None, help="Set rate index for RREQ in DSR [default=%default]")
parser.add_option("", "--datarate", dest="datarate", type="int", \
default=None, help="Set rate index for non-RREQ packets in DSR [default=%default]")
# mac parameters
# phy parameters
parser.add_option("", "--mcs", dest="mcs", type="int", \
default=0, help="Set rate index for MCS [default=%default]")
parser.add_option("", "--fomax", dest="fomax", \
type="float", default=0.0, \
help="Specify maximum frequency offset in ppm [default=%default]")
parser.add_option("", "--use-waveform", dest="usewaveform", action="store_true", \
default=False, help="Enable waveform-level simulation [default=%default]")
parser.add_option("", "--disable-cfo-correction", \
dest="disable_cfo_correction", action="store_true", \
default=False, help="Disable CFO correction in waveform-level simulation [default=%default]")
# channel parameters
parser.add_option("", "--tgn-model", dest="tgnmodel", \
default=None, help="Specify TGn model.")
parser.add_option("", "--alpha", dest="alpha", type="float", \
default=2.0, help="Specify pathloss exponent [default=%default]")
parser.add_option("", "--use-doppler", dest="usedoppler", action="store_true", \
default=False, help="Enable doppler filter for fading in TGn channel model.")
parser.add_option("", "--disable-fading", dest="usefading", action="store_false", \
default=True, help="Normalize channel and remove impact of fading on pathloss in TGn channel model.")
parser.add_option("-E", "--environment-speed", dest="envspeed", type="float", \
default=1.2, help="Environmental speed in (km/hr) [default=%default]")
parser.add_option("", "--bidirectional-channel", dest="bidirectional", action="store_true", \
default=False, help="Use bidirectional links in channel [default=%default]")
# topology/layout parameters
parser.add_option("", "--xmin", dest="xmin", type="float", \
default=0.0, help="Set x-axis left boundary [default=%default]")
parser.add_option("", "--xmax", dest="xmax", type="float", \
default=500.0, help="Set x-axis right boundary [default=%default]")
parser.add_option("", "--ymin", dest="ymin", type="float", \
default=0.0, help="Set y-axis lower boundary [default=%default]")
parser.add_option("", "--ymax", dest="ymax", type="float", \
default=500.0, help="Set y-axis upper boundary [default=%default]")
parser.add_option("", "--use-topo", dest="usetopo", \
default=None, help="Specify topology file instead of generating random topology.")
parser.add_option("", "--save-topo", dest="savetopo", \
default=None, help="Save topology to file.")
# routing parameters
parser.add_option("", "--use-route", dest="useroute", \
default=None, help="Specify routing file to initialize route tables.")
parser.add_option("", "--save-route", dest="saveroute", \
default=None, help="Save route tables to file.")
(options, args) = parser.parse_args()
if len(args)>0:
print "Invalid number of arguments."
parser.print_help()
raise SystemExit
run_experiment(options)
if __name__ == '__main__':
main()
| reidlindsay/wins | sandbox/experiments/dsr/icc/test.py | Python | apache-2.0 | 15,846 |
class Customer:
def __init__(self, firstname, lastname, country, address, postcode, city, email, phone, password):
self.firstname = firstname
self.lastname = lastname
self.country = country
self.address = address
self.postcode = postcode
self.city= city
self.email = email
self.phone = phone
self.password = password
def __repr__(self):
return "%s:%s:%s" % (self.email, self.firstname, self.lastname)
| Dob3r/python_seleniumwebdriver | Model/Customer.py | Python | apache-2.0 | 493 |
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto.sns as sns
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio import TwilioRestException
from twilio.rest import TwilioRestClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
elastalert_logger.info("Starting up method:---alerts.__init__---")
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = unicode(self.rule['alert_subject'])
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_subject_values)):
if alert_subject_values[i] is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(summary_table_fields_with_count)
text_table = Texttable()
text_table.header(summary_table_fields_with_count)
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.iteritems():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
return unicode(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append('1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
fullmessage['match'] = match[qk]
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])
conn.start()
conn.connect(self.stomp_login, self.stomp_password)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port')
self.user = self.rule.get('user')
self.password = self.rule.get('password')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], basestring):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, basestring):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, basestring):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, basestring):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP_SSL(self.smtp_host)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls()
#邮箱认证
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.close()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.watchers = self.rule.get('jira_watchers')
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
logging.warning(msg)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
try:
if self.priority is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys()))
def get_arbitrary_fields(self):
# This API returns metadata about all the fields defined on the jira server (built-ins and custom ones)
fields = self.client.fields()
for jira_field, value in self.rule.iteritems():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list:
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = unicode(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
elastalert_logger.info("Starting up method:---alerts.start---")
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
logging.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception("Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(watcher, ex)), None, sys.exc_info()[2]
except JIRAError as e:
raise EAException("Error creating JIRA ticket: %s" % (e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = u'{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and self.rule['query_key'] in matches[0]:
title = 'ElastAlert: %s matched %s' % (matches[0][self.rule['query_key']], self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], basestring):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
elastalert_logger.info("Starting up method:---alerts.command.alert---")
# Format the command and arguments
try:
if self.new_style_string_format:
command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]
else:
command = [command_arg % matches[0] for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json)
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
"""send alert using AWS SNS service"""
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.aws_access_key = self.rule.get('aws_access_key', '')
self.aws_secret_key = self.rule.get('aws_secret_key', '')
self.aws_region = self.rule.get('aws_region', 'us-east-1')
self.boto_profile = self.rule.get('boto_profile', '')
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
# use aws_access_key and aws_secret_key if specified; then use boto profile if specified;
# otherwise use instance role
if not self.aws_access_key and not self.aws_secret_key:
if not self.boto_profile:
sns_client = sns.connect_to_region(self.aws_region)
else:
sns_client = sns.connect_to_region(self.aws_region,
profile_name=self.boto_profile)
else:
sns_client = sns.connect_to_region(self.aws_region,
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key)
sns_client.publish(self.sns_topic_arn, body, subject=self.create_title(matches))
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class HipChatAlerter(Alerter):
""" Creates a HipChat room notification for each alert """
required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])
def __init__(self, rule):
super(HipChatAlerter, self).__init__(rule)
self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')
self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')
self.hipchat_auth_token = self.rule['hipchat_auth_token']
self.hipchat_room_id = self.rule['hipchat_room_id']
self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')
self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)
self.hipchat_notify = self.rule.get('hipchat_notify', True)
self.hipchat_from = self.rule.get('hipchat_from', '')
self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (
self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)
self.hipchat_proxy = self.rule.get('hipchat_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# HipChat sends 400 bad request on messages longer than 10000 characters
if (len(body) > 9999):
body = body[:9980] + '..(truncated)'
# Use appropriate line ending for text/html
if self.hipchat_message_format == 'html':
body = body.replace('\n', '<br />')
# Post to HipChat
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None
payload = {
'color': self.hipchat_msg_color,
'message': body,
'message_format': self.hipchat_message_format,
'notify': self.hipchat_notify,
'from': self.hipchat_from
}
try:
if self.hipchat_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,
verify=not self.hipchat_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to HipChat: %s" % e)
elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def get_info(self):
return {'type': 'hipchat',
'hipchat_room_id': self.hipchat_room_id}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, basestring):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
body = body.encode('UTF-8')
body = body.replace('&', '&')
body = body.replace('<', '<')
body = body.replace('>', '>')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'channel': self.slack_channel_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'fields': []
}
]
}
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
for url in self.slack_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert sent to Slack")
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override,
'slack_webhook_url': self.slack_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
payload = {
'service_key': self.pagerduty_service_key,
'description': self.rule['name'],
'event_type': 'trigger',
'incident_key': self.pagerduty_incident_key,
'client': self.pagerduty_client_name,
'details': {
"information": body.encode('UTF-8'),
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
elastalert_logger.info("Trigger sent to PagerDuty")
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except:
raise EAException("Error posting to Exotel")
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_accout_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_accout_sid = self.rule['twilio_accout_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule['twilio_from_number']
def alert(self, matches):
client = TwilioRestClient(self.twilio_accout_sid, self.twilio_auth_token)
try:
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_to_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
def alert(self, matches):
body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
body += u' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s" % e)
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set(['username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class SimplePostAlerter(Alerter):
def __init__(self, rule):
super(SimplePostAlerter, self).__init__(rule)
simple_webhook_url = self.rule.get('simple_webhook_url')
if isinstance(simple_webhook_url, basestring):
simple_webhook_url = [simple_webhook_url]
self.simple_webhook_url = simple_webhook_url
self.simple_proxy = self.rule.get('simple_proxy')
def alert(self, matches):
payload = {
'rule': self.rule['name'],
'matches': matches
}
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.simple_proxy} if self.simple_proxy else None
for url in self.simple_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting simple alert: %s" % e)
elastalert_logger.info("Simple alert sent")
def get_info(self):
return {'type': 'simple',
'simple_webhook_url': self.simple_webhook_url}
| jetyang2005/elastalert | elastalert/alerts.py | Python | apache-2.0 | 55,913 |
# coding=utf-8
#下面的代码是接口函数,无关
def get(ar,index):
l=len(ar);
if index<0:
return ar[l+index];
else:
return ar[index];
def find(ar,filter):
for r in ar:
if filter(r):
return r;
return None;
def execute(ar,filter,action):
for r in ar:
if filter(r):
action(r);
unabled=[户型图存储方案,户型图存储,安居客户型列表,安居客评价,安居客楼盘详情,相册存储方案,安居客相册];
for e in unabled:
e.etls[0].Enabled=False
页数范围控制=find(安居客核心流程.etls,lambda x:x.TypeName=='数量范围选择')
#下面是可能需要修改的配置:
###################################################
重试次数='3'
##要跳过的页数,注意是翻页的数量
页数范围控制.Skip=0
##要获取的页数,可以设置的非常大,这样就一直会到末尾
页数范围控制.Take=20000000
debug=False
#是否要进行增量抓取?
#注意:系统会在数据库里查询是否已有数据,因此可能会造成在调试时,没有任何数据显示(所有的数据都在数据库里了)
#如果无所谓重复,或为了调试观察,则
not_repeat=True
def work2(x):
x.Enabled=not_repeat;
def work(x):
x.MaxTryCount=重试次数;
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
execute(安居客核心流程.etls,lambda x:x.Name=='防重复',work2)
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
get(安居客核心流程.etls,-2).Enabled=not debug;
#是否要将完整的Json信息保存到数据库中
get(安居客核心流程.etls,-3).Enabled=False
#是否要保存相册?不论是否保存,都会将相册的路径存入数据库中
get(安居客相册.etls,-1).Enabled=True
#是否要保存户型图?不论是否保存,都会将户型图的路径存入数据库中
get(户型图存储.etls,-1).Enabled=True
#要采集的城市,使用正则表达式,如果包含全部城市,则写为''
get(安居客城市.etls,-1).Script='锦州|景德镇|吉安|济宁|金华|揭阳|晋中|九江|焦作|晋城|荆州|佳木斯|酒泉|鸡西|济源|金昌|嘉峪关'
#户型图的存储路径
get(户型图存储方案.etls,-4).Format='D:\安居客图片\{0}\户型图\{1}_{2}_{3}.jpg'
#相册的存储路径
get(相册存储方案.etls,-4).Format='D:\安居客图片\{0}\相册\{1}_{2}_{3}.jpg' | ferventdesert/Hawk-Projects | 安居客/安居客.py | Python | apache-2.0 | 2,402 |
# coding=utf-8#
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import hashlib
import json
import logging as std_logging
import os
import urllib
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
# configuration objects specific to iControl driver
# XXX see /etc/neutron/services/f5/f5-openstack-agent.ini
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_networks', default=False,
help='All networks defined under Common partition'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.BoolOpt(
'external_gateway_mode', default=False,
help='All subnets have an external l3 route on gateway'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
'os_user_domain_name',
default=None,
help='OpenStack user domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_name',
default=None,
help='OpenStack project name for Keystone authentication.'
),
cfg.StrOpt(
'os_project_domain_name',
default=None,
help='OpenStack domain name for Keystone authentication.'
),
cfg.StrOpt(
'os_password',
default=None,
help='OpenStack user password for Keystone authentication.'
),
cfg.StrOpt(
'f5_network_segment_physical_network', default=None,
help='Name of physical network to use for discovery of segment ID'
),
cfg.StrOpt(
'unlegacy_setting_placeholder', default=None,
help='use this setting to separate legacy with hw/etc on agent side'
),
cfg.IntOpt(
'f5_network_segment_polling_interval', default=10,
help='Seconds between periodic scans for disconnected virtual servers'
),
cfg.IntOpt(
'f5_network_segment_gross_timeout', default=300,
help='Seconds to wait for a virtual server to become connected'
),
cfg.StrOpt(
'f5_parent_ssl_profile',
default='clientssl',
help='Parent profile used when creating client SSL profiles '
'for listeners with TERMINATED_HTTPS protocols.'
),
cfg.StrOpt(
'os_tenant_name',
default=None,
help='OpenStack tenant name for Keystone authentication (v2 only).'
),
cfg.BoolOpt(
'trace_service_requests',
default=False,
help='Log service object.'
),
cfg.BoolOpt(
'report_esd_names_in_agent',
default=False,
help='whether or not to add valid esd names during report.'
)
]
def is_operational(method):
# Decorator to check we are operational before provisioning.
def wrapper(*args, **kwargs):
instance = args[0]
if instance.operational:
try:
return method(*args, **kwargs)
except IOError as ioe:
LOG.error('IO Error detected: %s' % method.__name__)
LOG.error(str(ioe))
raise ioe
else:
LOG.error('Cannot execute %s. Not operational. Re-initializing.'
% method.__name__)
instance._init_bigips()
return wrapper
class iControlDriver(LBaaSBaseDriver):
"""Control service deployment."""
# pzhang(NOTE) here: we only sync, CRUD objs in below status
positive_plugin_const_state = \
tuple([f5const.F5_PENDING_CREATE,
f5const.F5_PENDING_UPDATE])
def __init__(self, conf, registerOpts=True):
# The registerOpts parameter allows a test to
# turn off config option handling so that it can
# set the options manually instead.
super(iControlDriver, self).__init__(conf)
self.conf = conf
if registerOpts:
self.conf.register_opts(OPTS)
self.initialized = False
self.hostnames = None
self.device_type = conf.f5_device_type
self.plugin_rpc = None # overrides base, same value
self.agent_report_state = None # overrides base, same value
self.operational = False # overrides base, same value
self.driver_name = 'f5-lbaasv2-icontrol'
#
# BIG-IP containers
#
# BIG-IPs which currectly active
self.__bigips = {}
self.__last_connect_attempt = None
# HA and traffic group validation
self.ha_validated = False
self.tg_initialized = False
# traffic groups discovered from BIG-IPs for service placement
self.__traffic_groups = []
# base configurations to report to Neutron agent state reports
self.agent_configurations = {} # overrides base, same value
self.agent_configurations['device_drivers'] = [self.driver_name]
self.agent_configurations['icontrol_endpoints'] = {}
# to store the verified esd names
self.esd_names = []
# service component managers
self.tenant_manager = None
self.cluster_manager = None
self.system_helper = None
self.lbaas_builder = None
self.service_adapter = None
self.vlan_binding = None
self.l3_binding = None
self.cert_manager = None # overrides register_OPTS
# server helpers
self.stat_helper = stat_helper.StatHelper()
self.network_helper = network_helper.NetworkHelper()
# f5-sdk helpers
self.vs_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.pool_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
try:
# debug logging of service requests recieved by driver
if self.conf.trace_service_requests:
path = '/var/log/neutron/service/'
if not os.path.exists(path):
os.makedirs(path)
self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json'
with open(self.file_name, 'w') as fp:
fp.write('[{}] ')
# driver mode settings - GRM vs L2 adjacent
if self.conf.f5_global_routed_mode:
LOG.info('WARNING - f5_global_routed_mode enabled.'
' There will be no L2 or L3 orchestration'
' or tenant isolation provisioned. All vips'
' and pool members must be routable through'
' pre-provisioned SelfIPs.')
self.conf.use_namespaces = False
self.conf.f5_snat_mode = True
self.conf.f5_snat_addresses_per_subnet = 0
self.agent_configurations['tunnel_types'] = []
self.agent_configurations['bridge_mappings'] = {}
else:
self.agent_configurations['tunnel_types'] = \
self.conf.advertised_tunnel_types
for net_id in self.conf.common_network_ids:
LOG.debug('network %s will be mapped to /Common/%s'
% (net_id, self.conf.common_network_ids[net_id]))
self.agent_configurations['common_networks'] = \
self.conf.common_network_ids
LOG.debug('Setting static ARP population to %s'
% self.conf.f5_populate_static_arp)
self.agent_configurations['f5_common_external_networks'] = \
self.conf.f5_common_external_networks
f5const.FDB_POPULATE_STATIC_ARP = \
self.conf.f5_populate_static_arp
# parse the icontrol_hostname setting
self._init_bigip_hostnames()
# instantiate the managers
self._init_bigip_managers()
self.initialized = True
LOG.debug('iControlDriver loaded successfully')
except Exception as exc:
LOG.error("exception in intializing driver %s" % str(exc))
self._set_agent_status(False)
def connect(self):
# initialize communications wiht BIG-IP via iControl
try:
self._init_bigips()
except Exception as exc:
LOG.error("exception in intializing communications to BIG-IPs %s"
% str(exc))
self._set_agent_status(False)
def get_valid_esd_names(self):
LOG.debug("verified esd names in get_valid_esd_names():")
LOG.debug(self.esd_names)
return self.esd_names
def _init_bigip_managers(self):
if self.conf.vlan_binding_driver:
try:
self.vlan_binding = importutils.import_object(
self.conf.vlan_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import VLAN binding driver: %s'
% self.conf.vlan_binding_driver)
if self.conf.l3_binding_driver:
try:
self.l3_binding = importutils.import_object(
self.conf.l3_binding_driver, self.conf, self)
except ImportError:
LOG.error('Failed to import L3 binding driver: %s'
% self.conf.l3_binding_driver)
else:
LOG.debug('No L3 binding driver configured.'
' No L3 binding will be done.')
if self.conf.cert_manager:
try:
self.cert_manager = importutils.import_object(
self.conf.cert_manager, self.conf)
except ImportError as import_err:
LOG.error('Failed to import CertManager: %s.' %
import_err.message)
raise
except Exception as err:
LOG.error('Failed to initialize CertManager. %s' % err.message)
# re-raise as ImportError to cause agent exit
raise ImportError(err.message)
self.service_adapter = ServiceModelAdapter(self.conf)
self.tenant_manager = BigipTenantManager(self.conf, self)
self.cluster_manager = ClusterManager()
self.system_helper = SystemHelper()
self.lbaas_builder = LBaaSBuilder(self.conf, self)
if self.conf.f5_global_routed_mode:
self.network_builder = None
else:
self.network_builder = NetworkServiceBuilder(
self.conf.f5_global_routed_mode,
self.conf,
self,
self.l3_binding)
def _init_bigip_hostnames(self):
# Validate and parse bigip credentials
if not self.conf.icontrol_hostname:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_hostname',
opt_value='valid hostname or IP address'
)
if not self.conf.icontrol_username:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_username',
opt_value='valid username'
)
if not self.conf.icontrol_password:
raise f5ex.F5InvalidConfigurationOption(
opt_name='icontrol_password',
opt_value='valid password'
)
self.hostnames = self.conf.icontrol_hostname.split(',')
self.hostnames = [item.strip() for item in self.hostnames]
self.hostnames = sorted(self.hostnames)
# initialize per host agent_configurations
for hostname in self.hostnames:
self.__bigips[hostname] = bigip = type('', (), {})()
bigip.hostname = hostname
bigip.status = 'creating'
bigip.status_message = 'creating BIG-IP from iControl hostnames'
bigip.device_interfaces = dict()
self.agent_configurations[
'icontrol_endpoints'][hostname] = {}
self.agent_configurations[
'icontrol_endpoints'][hostname]['failover_state'] = \
'undiscovered'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status'] = 'unknown'
self.agent_configurations[
'icontrol_endpoints'][hostname]['status_message'] = ''
def _init_bigips(self):
# Connect to all BIG-IPs
if self.operational:
LOG.debug('iControl driver reports connection is operational')
return
LOG.debug('initializing communications to BIG-IPs')
try:
# setup logging options
if not self.conf.debug:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.ERROR)
requests_log.propagate = False
else:
requests_log = std_logging.getLogger(
"requests.packages.urllib3")
requests_log.setLevel(std_logging.DEBUG)
requests_log.propagate = True
self.__last_connect_attempt = datetime.datetime.now()
for hostname in self.hostnames:
# connect to each BIG-IP and set it status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('learned traffic groups from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = 'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s' % hostname)
bigip.status = 'active'
bigip.status_message = 'BIG-IP ready for provisioning'
self._post_init()
else:
LOG.debug('setting status to error for %s' % hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.error('error opening BIG-IP %s - %s:%s'
% (hostname, bigip.status, bigip.status_message))
self._set_agent_status(False)
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
self._set_agent_status(force_resync=True)
def _init_errored_bigips(self):
try:
errored_bigips = self.get_errored_bigips_hostnames()
if errored_bigips:
LOG.debug('attempting to recover %s BIG-IPs' %
len(errored_bigips))
for hostname in errored_bigips:
# try to connect and set status
bigip = self._open_bigip(hostname)
if bigip.status == 'connected':
# set the status down until we assure initialized
bigip.status = 'initializing'
bigip.status_message = 'initializing HA viability'
LOG.debug('initializing HA viability %s' % hostname)
LOG.debug('proceeding to initialize %s' % hostname)
device_group_name = None
if not self.ha_validated:
device_group_name = self._validate_ha(bigip)
LOG.debug('HA validated from %s with DSG %s' %
(hostname, device_group_name))
self.ha_validated = True
if not self.tg_initialized:
self._init_traffic_groups(bigip)
LOG.debug('known traffic groups initialized',
' from %s as %s' %
(hostname, self.__traffic_groups))
self.tg_initialized = True
LOG.debug('initializing bigip %s' % hostname)
self._init_bigip(bigip, hostname, device_group_name)
LOG.debug('initializing agent configurations %s'
% hostname)
self._init_agent_config(bigip)
# Assure basic BIG-IP HA is operational
LOG.debug('validating HA state for %s' % hostname)
bigip.status = 'validating_HA'
bigip.status_message = \
'validating the current HA state'
if self._validate_ha_operational(bigip):
LOG.debug('setting status to active for %s'
% hostname)
bigip.status = 'active'
bigip.status_message = \
'BIG-IP ready for provisioning'
self._post_init()
self._set_agent_status(True)
else:
LOG.debug('setting status to error for %s'
% hostname)
bigip.status = 'error'
bigip.status_message = 'BIG-IP is not operational'
self._set_agent_status(False)
else:
LOG.debug('there are no BIG-IPs with error status')
except Exception as exc:
LOG.error('Invalid agent configuration: %s' % exc.message)
raise
def _open_bigip(self, hostname):
# Open bigip connection
try:
bigip = self.__bigips[hostname]
if bigip.status not in ['creating', 'error']:
LOG.debug('BIG-IP %s status invalid %s to open a connection'
% (hostname, bigip.status))
return bigip
bigip.status = 'connecting'
bigip.status_message = 'requesting iControl endpoint'
LOG.info('opening iControl connection to %s @ %s' %
(self.conf.icontrol_username, hostname))
bigip = ManagementRoot(hostname,
self.conf.icontrol_username,
self.conf.icontrol_password,
timeout=f5const.DEVICE_CONNECTION_TIMEOUT,
debug=self.conf.debug)
bigip.status = 'connected'
bigip.status_message = 'connected to BIG-IP'
self.__bigips[hostname] = bigip
return bigip
except Exception as exc:
LOG.error('could not communicate with ' +
'iControl device: %s' % hostname)
# since no bigip object was created, create a dummy object
# so we can store the status and status_message attributes
errbigip = type('', (), {})()
errbigip.hostname = hostname
errbigip.status = 'error'
errbigip.status_message = str(exc)[:80]
self.__bigips[hostname] = errbigip
return errbigip
def _init_bigip(self, bigip, hostname, check_group_name=None):
# Prepare a bigip for usage
try:
major_version, minor_version = self._validate_bigip_version(
bigip, hostname)
device_group_name = None
extramb = self.system_helper.get_provision_extramb(bigip)
if int(extramb) < f5const.MIN_EXTRA_MB:
raise f5ex.ProvisioningExtraMBValidateFailed(
'Device %s BIG-IP not provisioned for '
'management LARGE.' % hostname)
if self.conf.f5_ha_type == 'pair' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type == 'scalen' and \
self.cluster_manager.get_sync_status(bigip) == \
'Standalone':
raise f5ex.BigIPClusterInvalidHA(
'HA mode is scalen and bigip %s in standalone mode'
% hostname)
if self.conf.f5_ha_type != 'standalone':
device_group_name = \
self.cluster_manager.get_device_group(bigip)
if not device_group_name:
raise f5ex.BigIPClusterInvalidHA(
'HA mode is %s and no sync failover '
'device group found for device %s.'
% (self.conf.f5_ha_type, hostname))
if check_group_name and device_group_name != check_group_name:
raise f5ex.BigIPClusterInvalidHA(
'Invalid HA. Device %s is in device group'
' %s but should be in %s.'
% (hostname, device_group_name, check_group_name))
bigip.device_group_name = device_group_name
if self.network_builder:
for network in self.conf.common_network_ids.values():
if not self.network_builder.vlan_exists(bigip,
network,
folder='Common'):
raise f5ex.MissingNetwork(
'Common network %s on %s does not exist'
% (network, bigip.hostname))
bigip.device_name = self.cluster_manager.get_device_name(bigip)
bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip)
LOG.debug("Initialized BIG-IP %s with MAC addresses %s" %
(bigip.device_name, ', '.join(bigip.mac_addresses)))
bigip.device_interfaces = \
self.system_helper.get_interface_macaddresses_dict(bigip)
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
if self.conf.f5_ha_type != 'standalone':
self.cluster_manager.disable_auto_sync(
device_group_name, bigip)
# validate VTEP SelfIPs
if not self.conf.f5_global_routed_mode:
self.network_builder.initialize_tunneling(bigip)
# Turn off tunnel syncing between BIG-IP
# as our VTEPs properly use only local SelfIPs
if self.system_helper.get_tunnel_sync(bigip) == 'enable':
self.system_helper.set_tunnel_sync(bigip, enabled=False)
LOG.debug('connected to iControl %s @ %s ver %s.%s'
% (self.conf.icontrol_username, hostname,
major_version, minor_version))
except Exception as exc:
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
raise
return bigip
def _post_init(self):
# After we have a connection to the BIG-IPs, initialize vCMP
# on all connected BIG-IPs
if self.network_builder:
self.network_builder.initialize_vcmp()
self.agent_configurations['network_segment_physical_network'] = \
self.conf.f5_network_segment_physical_network
LOG.info('iControlDriver initialized to %d bigips with username:%s'
% (len(self.get_active_bigips()),
self.conf.icontrol_username))
LOG.info('iControlDriver dynamic agent configurations:%s'
% self.agent_configurations)
if self.vlan_binding:
LOG.debug(
'getting BIG-IP device interface for VLAN Binding')
self.vlan_binding.register_bigip_interfaces()
if self.l3_binding:
LOG.debug('getting BIG-IP MAC Address for L3 Binding')
self.l3_binding.register_bigip_mac_addresses()
# endpoints = self.agent_configurations['icontrol_endpoints']
# for ic_host in endpoints.keys():
for hostbigip in self.get_all_bigips():
# hostbigip = self.__bigips[ic_host]
mac_addrs = [mac_addr for interface, mac_addr in
hostbigip.device_interfaces.items()
if interface != "mgmt"]
ports = self.plugin_rpc.get_ports_for_mac_addresses(
mac_addresses=mac_addrs)
if ports:
self.agent_configurations['nova_managed'] = True
else:
self.agent_configurations['nova_managed'] = False
if self.network_builder:
self.network_builder.post_init()
# read enhanced services definitions
esd_dir = os.path.join(self.get_config_dir(), 'esd')
esd = EsdTagProcessor(esd_dir)
try:
esd.process_esd(self.get_all_bigips())
self.lbaas_builder.init_esd(esd)
self.service_adapter.init_esd(esd)
LOG.debug('esd details here after process_esd(): ')
LOG.debug(esd)
self.esd_names = esd.esd_dict.keys() or []
LOG.debug('##### self.esd_names obtainded here:')
LOG.debug(self.esd_names)
except f5ex.esdJSONFileInvalidException as err:
LOG.error("unable to initialize ESD. Error: %s.", err.message)
self._set_agent_status(False)
def _validate_ha(self, bigip):
# if there was only one address supplied and
# this is not a standalone device, get the
# devices trusted by this device.
device_group_name = None
if self.conf.f5_ha_type == 'standalone':
if len(self.hostnames) != 1:
bigip.status = 'error'
bigip.status_message = \
'HA mode is standalone and %d hosts found.'\
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is standalone and %d hosts found.'
% len(self.hostnames))
device_group_name = 'standalone'
elif self.conf.f5_ha_type == 'pair':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) != 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device))
self.hostnames = mgmt_addrs
if len(self.hostnames) != 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is pair and %d hosts found.' \
% len(self.hostnames)
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and %d hosts found.'
% len(self.hostnames))
elif self.conf.f5_ha_type == 'scalen':
device_group_name = self.cluster_manager.\
get_device_group(bigip)
if len(self.hostnames) < 2:
mgmt_addrs = []
devices = self.cluster_manager.devices(bigip)
for device in devices:
mgmt_addrs.append(
self.cluster_manager.get_mgmt_addr_by_device(
bigip, device)
)
self.hostnames = mgmt_addrs
if len(self.hostnames) < 2:
bigip.status = 'error'
bigip.status_message = 'HA mode is scale and 1 hosts found.'
raise f5ex.BigIPClusterInvalidHA(
'HA mode is pair and 1 hosts found.')
return device_group_name
def _validate_ha_operational(self, bigip):
if self.conf.f5_ha_type == 'standalone':
return True
else:
# how many active BIG-IPs are there?
active_bigips = self.get_active_bigips()
if active_bigips:
sync_status = self.cluster_manager.get_sync_status(bigip)
if sync_status in ['Disconnected', 'Sync Failure']:
if len(active_bigips) > 1:
# the device should not be in the disconnected state
return False
if len(active_bigips) > 1:
# it should be in the same sync-failover group
# as the rest of the active bigips
device_group_name = \
self.cluster_manager.get_device_group(bigip)
for active_bigip in active_bigips:
adgn = self.cluster_manager.get_device_group(
active_bigip)
if not adgn == device_group_name:
return False
return True
else:
return True
def _init_agent_config(self, bigip):
# Init agent config
ic_host = {}
ic_host['version'] = self.system_helper.get_version(bigip)
ic_host['device_name'] = bigip.device_name
ic_host['platform'] = self.system_helper.get_platform(bigip)
ic_host['serial_number'] = self.system_helper.get_serial_number(bigip)
ic_host['status'] = bigip.status
ic_host['status_message'] = bigip.status_message
ic_host['failover_state'] = self.get_failover_state(bigip)
if hasattr(bigip, 'local_ip') and bigip.local_ip:
ic_host['local_ip'] = bigip.local_ip
else:
ic_host['local_ip'] = 'VTEP disabled'
self.agent_configurations['tunnel_types'] = list()
self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \
ic_host
if self.network_builder:
self.agent_configurations['bridge_mappings'] = \
self.network_builder.interface_mapping
def _set_agent_status(self, force_resync=False):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status'] = bigip.status
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'status_message'] = bigip.status_message
if self.conf.report_esd_names_in_agent:
LOG.debug('adding names to report:')
self.agent_configurations['esd_name'] = \
self.get_valid_esd_names()
# Policy - if any BIG-IP are active we're operational
if self.get_active_bigips():
self.operational = True
else:
self.operational = False
if self.agent_report_state:
self.agent_report_state(force_resync=force_resync)
def get_failover_state(self, bigip):
try:
if hasattr(bigip, 'tm'):
fs = bigip.tm.sys.dbs.db.load(name='failover.state')
bigip.failover_state = fs.value
return bigip.failover_state
else:
return 'error'
except Exception as exc:
LOG.exception('Error getting %s failover state' % bigip.hostname)
bigip.status = 'error'
bigip.status_message = str(exc)[:80]
self._set_agent_status(False)
return 'error'
def get_agent_configurations(self):
for hostname in self.__bigips:
bigip = self.__bigips[hostname]
if bigip.status == 'active':
failover_state = self.get_failover_state(bigip)
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = failover_state
else:
self.agent_configurations[
'icontrol_endpoints'][bigip.hostname][
'failover_state'] = 'unknown'
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status'] = bigip.status
self.agent_configurations['icontrol_endpoints'][
bigip.hostname]['status_message'] = bigip.status_message
self.agent_configurations['operational'] = \
self.operational
LOG.debug('agent configurations are: %s' % self.agent_configurations)
return dict(self.agent_configurations)
def recover_errored_devices(self):
# trigger a retry on errored BIG-IPs
try:
self._init_errored_bigips()
except Exception as exc:
LOG.error('Could not recover devices: %s' % exc.message)
def backend_integrity(self):
if self.operational:
return True
return False
def generate_capacity_score(self, capacity_policy=None):
"""Generate the capacity score of connected devices."""
if capacity_policy:
highest_metric = 0.0
highest_metric_name = None
my_methods = dir(self)
bigips = self.get_all_bigips()
for metric in capacity_policy:
func_name = 'get_' + metric
if func_name in my_methods:
max_capacity = int(capacity_policy[metric])
metric_func = getattr(self, func_name)
metric_value = 0
for bigip in bigips:
if bigip.status == 'active':
global_stats = \
self.stat_helper.get_global_statistics(bigip)
value = int(
metric_func(bigip=bigip,
global_statistics=global_stats)
)
LOG.debug('calling capacity %s on %s returned: %s'
% (func_name, bigip.hostname, value))
else:
value = 0
if value > metric_value:
metric_value = value
metric_capacity = float(metric_value) / float(max_capacity)
if metric_capacity > highest_metric:
highest_metric = metric_capacity
highest_metric_name = metric
else:
LOG.warn('capacity policy has method '
'%s which is not implemented in this driver'
% metric)
LOG.debug('capacity score: %s based on %s'
% (highest_metric, highest_metric_name))
return highest_metric
return 0
def set_context(self, context):
# Context to keep for database access
if self.network_builder:
self.network_builder.set_context(context)
def set_plugin_rpc(self, plugin_rpc):
# Provide Plugin RPC access
self.plugin_rpc = plugin_rpc
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
if self.network_builder:
self.network_builder.set_l2pop_rpc(l2pop_rpc)
def set_agent_report_state(self, report_state_callback):
"""Set Agent Report State."""
self.agent_report_state = report_state_callback
def service_exists(self, service):
return self._service_exists(service)
def flush_cache(self):
# Remove cached objects so they can be created if necessary
for bigip in self.get_all_bigips():
bigip.assured_networks = {}
bigip.assured_tenant_snat_subnets = {}
bigip.assured_gateway_subnets = []
@serialized('get_all_deployed_loadbalancers')
@is_operational
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False):
LOG.debug('getting all deployed loadbalancers on BIG-IPs')
deployed_lb_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[len(self.service_adapter.prefix):]
if lb_id in deployed_lb_dict:
deployed_lb_dict[lb_id][
'hostnames'].append(bigip.hostname)
else:
deployed_lb_dict[lb_id] = {
'id': lb_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
else:
# delay to assure we are not in the tenant creation
# process before a virtual address is created.
greenthread.sleep(10)
deployed_lbs = resource.get_resources(bigip, folder)
if deployed_lbs:
for lb in deployed_lbs:
lb_id = lb.name[
len(self.service_adapter.prefix):]
deployed_lb_dict[lb_id] = \
{'id': lb_id, 'tenant_id': tenant_id}
else:
# Orphaned folder!
if purge_orphaned_folders:
try:
self.system_helper.purge_folder_contents(
bigip, folder)
self.system_helper.purge_folder(
bigip, folder)
LOG.error('orphaned folder %s on %s' %
(folder, bigip.hostname))
except Exception as exc:
LOG.error('error purging folder %s: %s' %
(folder, str(exc)))
return deployed_lb_dict
@serialized('get_all_deployed_listeners')
@is_operational
def get_all_deployed_listeners(self, expand_subcollections=False):
LOG.debug('getting all deployed listeners on BIG-IPs')
deployed_virtual_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
deployed_listeners = resource.get_resources(
bigip, folder, expand_subcollections)
if deployed_listeners:
for virtual in deployed_listeners:
virtual_id = \
virtual.name[len(self.service_adapter.prefix):]
l7_policy = ''
if hasattr(virtual, 'policiesReference') and \
'items' in virtual.policiesReference:
l7_policy = \
virtual.policiesReference['items'][0]
l7_policy = l7_policy['fullPath']
if virtual_id in deployed_virtual_dict:
deployed_virtual_dict[virtual_id][
'hostnames'].append(bigip.hostname)
else:
deployed_virtual_dict[virtual_id] = {
'id': virtual_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'l7_policy': l7_policy
}
return deployed_virtual_dict
@serialized('purge_orphaned_nodes')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_nodes(self, tenant_members):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
node_dict = dict()
for bigip in self.get_all_bigips():
for tenant_id, members in tenant_members.iteritems():
partition = self.service_adapter.prefix + tenant_id
nodes = node_helper.get_resources(bigip, partition=partition)
for n in nodes:
node_dict[n.name] = n
for member in members:
rd = self.network_builder.find_subnet_route_domain(
tenant_id, member.get('subnet_id', None))
node_name = "{}%{}".format(member['address'], rd)
node_dict.pop(node_name, None)
for node_name, node in node_dict.iteritems():
try:
node_helper.delete(bigip, name=urllib.quote(node_name),
partition=partition)
except HTTPError as error:
if error.response.status_code == 400:
LOG.error(error.response)
@serialized('get_all_deployed_pools')
@is_operational
def get_all_deployed_pools(self):
LOG.debug('getting all deployed pools on BIG-IPs')
deployed_pool_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
deployed_pools = resource.get_resources(bigip, folder)
if deployed_pools:
for pool in deployed_pools:
pool_id = \
pool.name[len(self.service_adapter.prefix):]
monitor_id = ''
if hasattr(pool, 'monitor'):
monitor = pool.monitor.split('/')[2].strip()
monitor_id = \
monitor[len(self.service_adapter.prefix):]
LOG.debug(
'pool {} has monitor {}'.format(
pool.name, monitor))
else:
LOG.debug(
'pool {} has no healthmonitors'.format(
pool.name))
if pool_id in deployed_pool_dict:
deployed_pool_dict[pool_id][
'hostnames'].append(bigip.hostname)
else:
deployed_pool_dict[pool_id] = {
'id': pool_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname],
'monitors': monitor_id
}
return deployed_pool_dict
@serialized('purge_orphaned_pool')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_pool(self, tenant_id=None, pool_id=None,
hostnames=list()):
node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
pool_name = self.service_adapter.prefix + pool_id
partition = self.service_adapter.prefix + tenant_id
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, pool_name, partition)
members = pool.members_s.get_collection()
pool.delete()
for member in members:
node_name = member.address
try:
node_helper.delete(bigip,
name=urllib.quote(node_name),
partition=partition)
except HTTPError as e:
if e.response.status_code == 404:
pass
if e.response.status_code == 400:
LOG.warn("Failed to delete node -- in use")
else:
LOG.exception("Failed to delete node")
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('pool %s not on BIG-IP %s.'
% (pool_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging pool %s' % str(exc))
@serialized('get_all_deployed_monitors')
@is_operational
def get_all_deployed_health_monitors(self):
"""Retrieve a list of all Health Monitors deployed"""
LOG.debug('getting all deployed monitors on BIG-IP\'s')
monitor_types = ['http_monitor', 'https_monitor', 'tcp_monitor',
'ping_monitor']
deployed_monitor_dict = {}
adapter_prefix = self.service_adapter.prefix
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(adapter_prefix):]
if str(folder).startswith(adapter_prefix):
resources = map(
lambda x: resource_helper.BigIPResourceHelper(
getattr(resource_helper.ResourceType, x)),
monitor_types)
for resource in resources:
deployed_monitors = resource.get_resources(
bigip, folder)
if deployed_monitors:
for monitor in deployed_monitors:
monitor_id = monitor.name[len(adapter_prefix):]
if monitor_id in deployed_monitor_dict:
deployed_monitor_dict[monitor_id][
'hostnames'].append(bigip.hostname)
else:
deployed_monitor_dict[monitor_id] = {
'id': monitor_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_monitor_dict
@serialized('purge_orphaned_health_monitor')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None,
hostnames=list()):
"""Purge all monitors that exist on the BIG-IP but not in Neutron"""
resource_types = [
resource_helper.BigIPResourceHelper(x) for x in [
resource_helper.ResourceType.http_monitor,
resource_helper.ResourceType.https_monitor,
resource_helper.ResourceType.ping_monitor,
resource_helper.ResourceType.tcp_monitor]]
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
monitor_name = self.service_adapter.prefix + monitor_id
partition = self.service_adapter.prefix + tenant_id
monitor = None
for monitor_type in resource_types:
try:
monitor = monitor_type.load(bigip, monitor_name,
partition)
break
except HTTPError as err:
if err.response.status_code == 404:
continue
monitor.delete()
except TypeError as err:
if 'NoneType' in err:
LOG.exception("Could not find monitor {}".format(
monitor_name))
except Exception as exc:
LOG.exception('Exception purging monitor %s' % str(exc))
@serialized('get_all_deployed_l7_policys')
@is_operational
def get_all_deployed_l7_policys(self):
"""Retrieve a dict of all l7policies deployed
The dict returned will have the following format:
{policy_bigip_id_0: {'id': policy_id_0,
'tenant_id': tenant_id,
'hostnames': [hostnames_0]}
...
}
Where hostnames is the list of BIG-IP hostnames impacted, and the
policy_id is the policy_bigip_id without 'wrapper_policy_'
"""
LOG.debug('getting all deployed l7_policys on BIG-IP\'s')
deployed_l7_policys_dict = {}
for bigip in self.get_all_bigips():
folders = self.system_helper.get_folders(bigip)
for folder in folders:
tenant_id = folder[len(self.service_adapter.prefix):]
if str(folder).startswith(self.service_adapter.prefix):
resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy)
deployed_l7_policys = resource.get_resources(
bigip, folder)
if deployed_l7_policys:
for l7_policy in deployed_l7_policys:
l7_policy_id = l7_policy.name
if l7_policy_id in deployed_l7_policys_dict:
my_dict = \
deployed_l7_policys_dict[l7_policy_id]
my_dict['hostnames'].append(bigip.hostname)
else:
po_id = l7_policy_id.replace(
'wrapper_policy_', '')
deployed_l7_policys_dict[l7_policy_id] = {
'id': po_id,
'tenant_id': tenant_id,
'hostnames': [bigip.hostname]
}
return deployed_l7_policys_dict
@serialized('purge_orphaned_l7_policy')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None,
hostnames=list(), listener_id=None):
"""Purge all l7_policys that exist on the BIG-IP but not in Neutron"""
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
error = None
try:
l7_policy_name = l7_policy_id
partition = self.service_adapter.prefix + tenant_id
if listener_id and partition:
if self.service_adapter.prefix not in listener_id:
listener_id = \
self.service_adapter.prefix + listener_id
li_resource = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_id, partition)
li_resource.update(policies=[])
l7_policy = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.l7policy).load(
bigip, l7_policy_name, partition)
l7_policy.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('l7_policy %s not on BIG-IP %s.'
% (l7_policy_id, bigip.hostname))
else:
error = err
except Exception as exc:
error = err
if error:
kwargs = dict(
tenant_id=tenant_id, l7_policy_id=l7_policy_id,
hostname=bigip.hostname, listener_id=listener_id)
LOG.exception('Exception: purge_orphaned_l7_policy({}) '
'"{}"'.format(kwargs, exc))
@serialized('purge_orphaned_loadbalancer')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=list()):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
partition = self.service_adapter.prefix + tenant_id
va = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).load(
bigip, va_name, partition)
# get virtual services (listeners)
# referencing this virtual address
vses = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).get_resources(
bigip, partition)
vs_dest_compare = '/' + partition + '/' + va.name
for vs in vses:
if str(vs.destination).startswith(vs_dest_compare):
if hasattr(vs, 'pool'):
pool = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool).load(
bigip, os.path.basename(vs.pool),
partition)
vs.delete()
pool.delete()
else:
vs.delete()
resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual_address).delete(
bigip, va_name, partition)
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('loadbalancer %s not on BIG-IP %s.'
% (loadbalancer_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging loadbalancer %s'
% str(exc))
@serialized('purge_orphaned_listener')
@is_operational
@log_helpers.log_method_call
def purge_orphaned_listener(
self, tenant_id=None, listener_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
listener_name = self.service_adapter.prefix + listener_id
partition = self.service_adapter.prefix + tenant_id
listener = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual).load(
bigip, listener_name, partition)
listener.delete()
except HTTPError as err:
if err.response.status_code == 404:
LOG.debug('listener %s not on BIG-IP %s.'
% (listener_id, bigip.hostname))
except Exception as exc:
LOG.exception('Exception purging listener %s' % str(exc))
@serialized('create_loadbalancer')
@is_operational
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server."""
self._common_service_handler(service)
return self._update_target(service)
@serialized('update_loadbalancer')
@is_operational
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server."""
# anti-pattern three args unused.
self._common_service_handler(service)
return self._update_target(service)
@serialized('delete_loadbalancer')
@is_operational
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer."""
LOG.debug("Deleting loadbalancer")
self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
return self._update_target(service)
@serialized('create_listener')
@is_operational
@log_helpers.log_method_call
def create_listener(self, listener, service):
"""Create virtual server."""
LOG.debug("Creating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('update_listener')
@is_operational
def update_listener(self, old_listener, listener, service):
"""Update virtual server."""
LOG.debug("Updating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('delete_listener')
@is_operational
def delete_listener(self, listener, service):
"""Delete virtual server."""
LOG.debug("Deleting listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service)
@serialized('create_pool')
@is_operational
def create_pool(self, pool, service):
"""Create lb pool."""
LOG.debug("Creating pool")
# pzhang(NOTE): pool may not bound with a listener
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('update_pool')
@is_operational
def update_pool(self, old_pool, pool, service):
"""Update lb pool."""
LOG.debug("Updating pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('delete_pool')
@is_operational
def delete_pool(self, pool, service):
"""Delete lb pool."""
LOG.debug("Deleting pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_pool_status,
service["pools"])
@serialized('create_l7policy')
@is_operational
def create_l7policy(self, l7policy, service):
"""Create lb l7policy."""
LOG.debug("Creating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('update_l7policy')
@is_operational
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy."""
LOG.debug("Updating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
@serialized('delete_l7policy')
@is_operational
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy."""
LOG.debug("Deleting l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7policy_status,
service["l7policies"])
# TODO(pzhang): test this
@serialized('create_l7rule')
@is_operational
def create_l7rule(self, l7rule, service):
"""Create lb l7rule."""
LOG.debug("Creating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('update_l7rule')
@is_operational
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule."""
LOG.debug("Updating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
# TODO(pzhang): test this
@serialized('delete_l7rule')
@is_operational
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule."""
LOG.debug("Deleting l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_l7rule_status,
service['l7policy_rules'])
@serialized('create_member')
@is_operational
def create_member(self, member, service):
"""Create pool member."""
LOG.debug("Creating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('update_member')
@is_operational
def update_member(self, old_member, member, service):
"""Update pool member."""
LOG.debug("Updating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('delete_member')
@is_operational
def delete_member(self, member, service):
"""Delete pool member."""
LOG.debug("Deleting member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_member_status,
service["members"])
@serialized('create_health_monitor')
@is_operational
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor."""
LOG.debug("Creating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('update_health_monitor')
@is_operational
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor."""
LOG.debug("Updating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
@serialized('delete_health_monitor')
@is_operational
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor."""
LOG.debug("Deleting health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
self._update_health_monitor_status,
service["healthmonitors"])
def _update_target(self, service,
update_method=None, target=None):
if self.do_service_update:
if target is not None and update_method is not None:
update_method(target)
self._update_loadbalancer_status(service, timed_out=False)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
@is_operational
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG-IPs
vs_stats = self.lbaas_builder.get_listener_stats(service, stats)
# convert to bytes
lb_stats[f5const.F5_STATS_IN_BYTES] = \
vs_stats['clientside.bitsIn']/8
lb_stats[f5const.F5_STATS_OUT_BYTES] = \
vs_stats['clientside.bitsOut']/8
lb_stats[f5const.F5_STATS_ACTIVE_CONNECTIONS] = \
vs_stats['clientside.curConns']
lb_stats[f5const.F5_STATS_TOTAL_CONNECTIONS] = \
vs_stats['clientside.totConns']
# update Neutron
self.plugin_rpc.update_loadbalancer_stats(
loadbalancer['id'], lb_stats)
except Exception as e:
LOG.error("Error getting loadbalancer stats: %s", e.message)
finally:
return lb_stats
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_operational
def sync(self, service):
"""Sync service defintion to device."""
# loadbalancer and plugin_rpc may not be set
lb_id = service.get('loadbalancer', dict()).get('id', '')
if hasattr(self, 'plugin_rpc') and self.plugin_rpc and lb_id:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(lb_id)
if service.get('loadbalancer', None):
self.lbaas_builder.to_sync = True
self._common_service_handler(service)
self.lbaas_builder.to_sync = False
# pzhang(NOTE): move udpate neutron db out here for the lb tree
if self.do_service_update:
self.update_service_status(service)
loadbalancer = service.get('loadbalancer', {})
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
lb_pending = \
(lb_provisioning_status == f5const.F5_PENDING_CREATE or
lb_provisioning_status == f5const.F5_PENDING_UPDATE)
return lb_pending
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_operational
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _get_monitor_endpoint(self, bigip, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if not monitor_type:
monitor_type = ""
if monitor_type == "HTTPS":
hm = bigip.tm.ltm.monitor.https_s.https
elif monitor_type == "TCP":
hm = bigip.tm.ltm.monitor.tcps.tcp
elif monitor_type == "PING":
hm = bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp
else:
hm = bigip.tm.ltm.monitor.https.http
return hm
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the correctly named virtual address exist?
for bigip in bigips:
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
rename_required = True
break
return rename_required
def service_object_teardown(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Change to bigips
for bigip in bigips:
# Delete all virtuals
v = bigip.tm.ltm.virtuals.virtual
for listener in service['listeners']:
l_name = listener.get("name", "")
if not l_name:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
vip = self.service_adapter.get_virtual(svc)
l_name = vip['name']
if v.exists(name=l_name, partition=folder_name):
# Found a virtual that is named by the OS object,
# delete it.
l_obj = v.load(name=l_name, partition=folder_name)
LOG.warn("Deleting listener: /%s/%s" %
(folder_name, l_name))
l_obj.delete(name=l_name, partition=folder_name)
# Delete all pools
p = bigip.tm.ltm.pools.pool
for os_pool in service['pools']:
p_name = os_pool.get('name', "")
if not p_name:
svc = {"loadbalancer": loadbalancer,
"pool": os_pool}
pool = self.service_adapter.get_pool(svc)
p_name = pool['name']
if p.exists(name=p_name, partition=folder_name):
p_obj = p.load(name=p_name, partition=folder_name)
LOG.warn("Deleting pool: /%s/%s" % (folder_name, p_name))
p_obj.delete(name=p_name, partition=folder_name)
# Delete all healthmonitors
for healthmonitor in service['healthmonitors']:
svc = {'loadbalancer': loadbalancer,
'healthmonitor': healthmonitor}
monitor_ep = self._get_monitor_endpoint(bigip, svc)
m_name = healthmonitor.get('name', "")
if not m_name:
hm = self.service_adapter.get_healthmonitor(svc)
m_name = hm['name']
if monitor_ep.exists(name=m_name, partition=folder_name):
m_obj = monitor_ep.load(name=m_name, partition=folder_name)
LOG.warn("Deleting monitor: /%s/%s" % (
folder_name, m_name))
m_obj.delete()
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
if self.network_builder:
# append route domain to member address
self.network_builder._annotate_service_route_domains(service)
# Foreach bigip in the cluster:
for bigip in self.get_config_bigips():
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
LOG.error("Folder %s does not exists on bigip: %s" %
(folder_name, bigip.hostname))
return False
# Get the virtual address
virtual_address = VirtualAddress(self.service_adapter,
loadbalancer)
if not virtual_address.exists(bigip):
LOG.error("Virtual address %s(%s) does not "
"exists on bigip: %s" % (virtual_address.name,
virtual_address.address,
bigip.hostname))
return False
# Ensure that each virtual service exists.
for listener in service['listeners']:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
virtual_server = self.service_adapter.get_virtual_name(svc)
if not self.vs_manager.exists(bigip,
name=virtual_server['name'],
partition=folder_name):
LOG.error("Virtual /%s/%s not found on bigip: %s" %
(virtual_server['name'], folder_name,
bigip.hostname))
return False
# Ensure that each pool exists.
for pool in service['pools']:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
bigip_pool = self.service_adapter.get_pool(svc)
if not self.pool_manager.exists(
bigip,
name=bigip_pool['name'],
partition=folder_name):
LOG.error("Pool /%s/%s not found on bigip: %s" %
(folder_name, bigip_pool['name'],
bigip.hostname))
return False
else:
deployed_pool = self.pool_manager.load(
bigip,
name=bigip_pool['name'],
partition=folder_name)
deployed_members = \
deployed_pool.members_s.get_collection()
# First check that number of members deployed
# is equal to the number in the service.
if len(deployed_members) != len(pool['members']):
LOG.error("Pool %s members member count mismatch "
"match: deployed %d != service %d" %
(bigip_pool['name'], len(deployed_members),
len(pool['members'])))
return False
# Ensure each pool member exists
for member in service['members']:
if member['pool_id'] == pool['id']:
lb = self.lbaas_builder
pool = lb.get_pool_by_id(
service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if not lb.pool_builder.member_exists(svc, bigip):
LOG.error("Pool member not found: %s" %
svc['member'])
return False
# Ensure that each health monitor exists.
for healthmonitor in service['healthmonitors']:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": healthmonitor}
monitor = self.service_adapter.get_healthmonitor(svc)
monitor_ep = self._get_monitor_endpoint(bigip, svc)
if not monitor_ep.exists(name=monitor['name'],
partition=folder_name):
LOG.error("Monitor /%s/%s not found on bigip: %s" %
(monitor['name'], folder_name, bigip.hostname))
return False
return True
def get_loadbalancers_in_tenant(self, tenant_id):
loadbalancers = self.plugin_rpc.get_all_loadbalancers()
return [lb['lb_id'] for lb in loadbalancers
if lb['tenant_id'] == tenant_id]
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
self.do_service_update = True
if self.conf.trace_service_requests:
self.trace_service_requests(service)
loadbalancer = service.get("loadbalancer", None)
if not loadbalancer:
LOG.error("_common_service_handler: Service loadbalancer is None")
return lb_pending
lb_provisioning_status = loadbalancer.get("provisioning_status",
f5const.F5_ERROR)
try:
try:
self.tenant_manager.assure_tenant_created(service)
except Exception as e:
LOG.error("Tenant folder creation exception: %s",
e.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise e
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
loadbalancer['traffic_group'] = traffic_group
if self.network_builder:
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except f5ex.NetworkNotReady as error:
LOG.debug("Network creation deferred until network "
"definition is completed: %s",
error.message)
if not delete_event:
self.do_service_update = False
raise error
except Exception as error:
LOG.error("Prep-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
if not delete_event:
raise error
finally:
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
all_subnet_hints = {}
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
try:
self.network_builder.post_service_networking(
service, all_subnet_hints)
except Exception as error:
LOG.error("Post-network exception: icontrol_driver: %s",
error.message)
if lb_provisioning_status != f5const.F5_PENDING_DELETE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
raise error
if time() - start_time > .001:
LOG.debug(" _post_service_networking "
"took %.5f secs" % (time() - start_time))
except f5ex.NetworkNotReady as error:
pass
except Exception as err:
LOG.exception(err)
finally:
# only delete partition if loadbalancer is being deleted
if lb_provisioning_status == f5const.F5_PENDING_DELETE:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
def update_service_status(self, service, timed_out=False):
"""Update status of objects in controller."""
LOG.debug("_update_service_status")
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'], timed_out)
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
if 'l7policy_rules' in service:
self._update_l7rule_status(service['l7policy_rules'])
if 'l7policies' in service:
self._update_l7policy_status(service['l7policies'])
self._update_loadbalancer_status(service, timed_out)
def _update_member_status(self, members, timed_out=False):
"""Update member status in OpenStack."""
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
if timed_out and \
provisioning_status != f5const.F5_ACTIVE:
member['provisioning_status'] = f5const.F5_ERROR
operating_status = f5const.F5_OFFLINE
else:
member['provisioning_status'] = f5const.F5_ACTIVE
operating_status = f5const.F5_ONLINE
self.plugin_rpc.update_member_status(
member['id'],
member['provisioning_status'],
operating_status
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
if not member.get('parent_pool_deleted', False):
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_member_status(
member['id'],
f5const.F5_ERROR,
f5const.F5_OFFLINE)
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack."""
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
health_monitor['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack."""
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_pool_status(
pool['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
pool['provisioning_status'] = f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack."""
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_listener_status(
listener['id'],
f5const.F5_ACTIVE,
listener['operating_status']
)
listener['provisioning_status'] = \
f5const.F5_ACTIVE
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
f5const.F5_OFFLINE)
@log_helpers.log_method_call
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack."""
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7rule_status(
l7rule['id'],
l7rule['policy_id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.l7rule_destroyed(
l7rule['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7rule_status(
l7rule['id'], l7rule['policy_id'])
@log_helpers.log_method_call
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack."""
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
self.plugin_rpc.update_l7policy_status(
l7policy['id'],
f5const.F5_ACTIVE,
f5const.F5_ONLINE
)
elif provisioning_status == f5const.F5_PENDING_DELETE:
LOG.debug("calling l7policy_destroyed")
self.plugin_rpc.l7policy_destroyed(
l7policy['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_l7policy_status(l7policy['id'])
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack."""
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
f5const.F5_ERROR)
# if provisioning_status in self.positive_plugin_const_state:
if provisioning_status in self.positive_plugin_const_state:
if timed_out:
operating_status = (f5const.F5_OFFLINE)
if provisioning_status == f5const.F5_PENDING_CREATE:
loadbalancer['provisioning_status'] = \
f5const.F5_ERROR
else:
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
else:
operating_status = (f5const.F5_ONLINE)
loadbalancer['provisioning_status'] = \
f5const.F5_ACTIVE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
loadbalancer['provisioning_status'],
operating_status)
elif provisioning_status == f5const.F5_PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == f5const.F5_ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
f5const.F5_OFFLINE)
elif provisioning_status == f5const.F5_ACTIVE:
LOG.debug('Loadbalancer provisioning status is active')
else:
LOG.error('Loadbalancer provisioning status is invalid')
@is_operational
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
try:
self.network_builder._annotate_service_route_domains(
service)
except f5ex.InvalidNetworkType as exc:
LOG.warning(exc.msg)
return
# get currrent member status
self.lbaas_builder.update_operating_status(service)
# udpate Neutron
for member in service['members']:
if member['provisioning_status'] == f5const.F5_ACTIVE:
operating_status = member.get('operating_status', None)
self.plugin_rpc.update_member_status(
member['id'],
provisioning_status=None,
operating_status=operating_status)
def get_active_bigip(self):
bigips = self.get_all_bigips()
if len(bigips) == 1:
return bigips[0]
for bigip in bigips:
if hasattr(bigip, 'failover_state'):
if bigip.failover_state == 'active':
return bigip
# if can't determine active, default to first one
return bigips[0]
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
# these functions should return only active BIG-IP
# not errored BIG-IPs.
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host]
def get_bigip_hosts(self):
return_hosts = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_hosts.append(host)
return sorted(return_hosts)
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips
def get_config_bigips(self):
return self.get_all_bigips()
# these are the refactored methods
def get_active_bigips(self):
return self.get_all_bigips()
def get_errored_bigips_hostnames(self):
return_hostnames = []
for host in list(self.__bigips):
bigip = self.__bigips[host]
if hasattr(bigip, 'status') and bigip.status == 'error':
return_hostnames.append(host)
return return_hostnames
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
def get_tenant_count(self, bigip=None, global_statistics=None):
return self.system_helper.get_tenant_folder_count(bigip)
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip)
def get_vlan_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_vlan_count(bigip)
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip)
def _init_traffic_groups(self, bigip):
try:
LOG.debug('retrieving traffic groups from %s' % bigip.hostname)
self.__traffic_groups = \
self.cluster_manager.get_traffic_groups(bigip)
if 'traffic-group-local-only' in self.__traffic_groups:
LOG.debug('removing reference to non-floating traffic group')
self.__traffic_groups.remove('traffic-group-local-only')
self.__traffic_groups.sort()
LOG.debug('service placement will done on traffic group(s): %s'
% self.__traffic_groups)
except Exception:
bigip.status = 'error'
bigip.status_message = \
'could not determine traffic groups for service placement'
raise
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
minor_version = self.system_helper.get_minor_version(bigip)
if minor_version < f5const.MIN_TMOS_MINOR_VERSION:
raise f5ex.MinorVersionValidateFailed(
'Device %s must be at least TMOS %s.%s'
% (hostname, f5const.MIN_TMOS_MAJOR_VERSION,
f5const.MIN_TMOS_MINOR_VERSION))
return major_version, minor_version
def trace_service_requests(self, service):
"""Dump services to a file for debugging."""
with open(self.file_name, 'r+') as fp:
fp.seek(-1, 2)
fp.write(',')
json.dump(service, fp, sort_keys=True, indent=2)
fp.write(']')
def get_config_dir(self):
"""Determine F5 agent configuration directory.
Oslo cfg has a config_dir option, but F5 agent is not currently
started with this option. To be complete, the code will check if
config_dir is defined, and use that value as long as it is a single
string (no idea what to do if it is not a str). If not defined,
get the full dir path of the INI file, which is currently used when
starting F5 agent. If neither option is available,
use /etc/neutron/services/f5.
:return: str defining configuration directory.
"""
if self.conf.config_dir and isinstance(self.conf.config_dir, str):
# use config_dir parameter if defined, and is a string
return self.conf.config_dir
elif self.conf.config_file:
# multiple config files (neutron and agent) are usually defined
if isinstance(self.conf.config_file, list):
# find agent config (f5-openstack-agent.ini)
config_files = self.conf.config_file
for file_name in config_files:
if 'f5-openstack-agent.ini' in file_name:
return os.path.dirname(file_name)
elif isinstance(self.conf.config_file, str):
# not a list, just a single string
return os.path.dirname(self.conf.config_file)
# if all else fails
return '/etc/neutron/services/f5'
| F5Networks/f5-openstack-agent | f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py | Python | apache-2.0 | 112,266 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012, The Honeynet Project. All rights reserved.
# Author: Kun Yang <[email protected]>
#
# APKIL is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# APKIL is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with APKIL. If not, see <http://www.gnu.org/licenses/>.
| JulianSchuette/android-instrumentation | injector/injector/apk.py | Python | apache-2.0 | 726 |
"""
Predict labels using trained ML models. Use average probability ensemble.
"""
__author__ = 'bshang'
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.externals import joblib
def convert_label_to_array(str_label):
str_label = str_label.split(' ')
return [int(x) for x in str_label if len(x) > 0]
MODEL = 'inception-v3'
LAYER = 'global_pool_output'
NUM_EPOCH = 30
BIZ_FEATURES_PATH = '/data/test_biz_features_{0}_{1}_{2}.h5'.format(MODEL, LAYER, NUM_EPOCH)
df = pd.read_csv(BIZ_FEATURES_PATH, header=0)
cols = ["F" + str(i+1) for i in range(0, 2048)]
X = df[cols].values
model_svc = joblib.load('/data/skmodels/svc_inception-v3.pkl')
model_lrc = joblib.load('/data/skmodels/lrc_inception-v3.pkl')
model_rfc = joblib.load('/data/skmodels/rfc_inception-v3.pkl')
print('predict svc')
y_predict_proba_svc = model_svc.predict_proba(X)
print('predict lrc')
y_predict_proba_lrc = model_lrc.predict_proba(X)
print('predict rfc')
y_predict_proba_rfc = model_rfc.predict_proba(X)
y_predict_proba = np.mean(
np.array([y_predict_proba_svc, y_predict_proba_lrc, y_predict_proba_rfc]), axis=0)
THRESHOLD = 0.46 # estimated from cross-validation
y_predict = preprocessing.binarize(y_predict_proba, threshold=THRESHOLD)
# convert the binary labels back to numbered labels
df_biz2lab = pd.read_csv('/data/train.csv').dropna()
y = np.array([convert_label_to_array(y) for y in df_biz2lab['labels']])
mlb = preprocessing.MultiLabelBinarizer()
mlb.fit_transform(y)
y_ = mlb.inverse_transform(y_predict) # y_ contain the numbered labels
y_ = [' '.join(str(x) for x in ls) for ls in y_]
df['labels'] = pd.Series(y_, index=df.index)
df = df.sort_values('business_id')
with open('/data/submission/inception_v3_svc_rfc_lrc_epoch3.csv', 'w') as f:
df[['business_id', 'labels']].to_csv(f, index=False)
| bzshang/yelp-photo-classification | scripts/test_ml.py | Python | apache-2.0 | 1,851 |
import re
from models.contact import Contact
def test_all_contacts_on_homepage(app, db):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
contacts_from_homepage = sorted(app.contact.get_contact_list(), key = Contact.contact_id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key = Contact.contact_id_or_max)
for i in range(len(contacts_from_homepage)):
hp_contact=contacts_from_homepage[i]
db_contact=contacts_from_db[i]
assert hp_contact.first_name == db_contact.first_name
assert hp_contact.last_name == db_contact.last_name
assert clear_address(hp_contact.address) == clear_address(db_contact.address)
assert clear_phone(hp_contact.all_phones_homepage) == clear_phone(merge_phones_homepage(db_contact))
assert hp_contact.all_emails_homepage == merge_emails_homepage(db_contact)
print("Successfully verified %s contacts vs Database" % str(len(contacts_from_homepage)))
"""def test_contact_on_homepage(app):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
index = randrange(len(app.contact.get_contact_list()))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_data_editpage(index)
assert contact_from_homepage.first_name == contact_from_editpage.first_name
assert contact_from_homepage.last_name == contact_from_editpage.last_name
assert contact_from_homepage.address == contact_from_editpage.address
assert contact_from_homepage.all_phones_homepage == merge_phones_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails_homepage == merge_emails_homepage(contact_from_editpage)"""
"""def test_phones_on_viewpage(app):
contact_from_viewpage = app.contact.get_contact_data_viewpage(0)
contact_from_editpage = app.contact.get_contact_data_editpage(0)
assert contact_from_viewpage.home_phone == contact_from_editpage.home_phone
assert contact_from_viewpage.work_phone == contact_from_editpage.work_phone
assert contact_from_viewpage.mobile_phone == contact_from_editpage.mobile_phone
assert contact_from_viewpage.fax == contact_from_editpage.fax"""
def clear(s):
#return "".join(symbol for symbol in s if symbol not in "[]()- 0")
return re.sub("[- ()]", "", s)
def clear_phone(number):
return re.sub("0", "", number)
def clear_address(address):
return re.sub("[\n\r\s+]", "", address)
def merge_phones_homepage(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone, contact.work_phone]))))
def merge_emails_homepage(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None,
[contact.email_1, contact.email_2, contact.email_3])))
| rgurevych/python_for_testers | tests/test_contacts_data_compliance.py | Python | apache-2.0 | 3,163 |
"""Shared class to maintain Plex server instances."""
import logging
import ssl
import time
from urllib.parse import urlparse
from plexapi.client import PlexClient
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
import plexapi.myplex
import plexapi.playqueue
import plexapi.server
from requests import Session
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.media_player.const import (
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import callback
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_USE_EPISODE_ART,
DEBOUNCE_TIMEOUT,
DEFAULT_VERIFY_SSL,
DOMAIN,
GDM_DEBOUNCER,
GDM_SCANNER,
PLAYER_SOURCE,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
PLEXTV_THROTTLE,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import (
MediaNotFound,
NoServersFound,
ServerNotSpecified,
ShouldUpdateConfigEntry,
)
from .media_search import lookup_movie, lookup_music, lookup_tv
from .models import PlexSession
_LOGGER = logging.getLogger(__name__)
# Set default headers sent by plexapi
plexapi.X_PLEX_DEVICE_NAME = X_PLEX_DEVICE_NAME
plexapi.X_PLEX_PLATFORM = X_PLEX_PLATFORM
plexapi.X_PLEX_PRODUCT = X_PLEX_PRODUCT
plexapi.X_PLEX_VERSION = X_PLEX_VERSION
class PlexServer:
"""Manages a single Plex server connection."""
def __init__(
self, hass, server_config, known_server_id=None, options=None, entry_id=None
):
"""Initialize a Plex server instance."""
self.hass = hass
self.entry_id = entry_id
self.active_sessions = {}
self._plex_account = None
self._plex_server = None
self._created_clients = set()
self._known_clients = set()
self._known_idle = set()
self._url = server_config.get(CONF_URL)
self._token = server_config.get(CONF_TOKEN)
self._server_name = server_config.get(CONF_SERVER)
self._verify_ssl = server_config.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
self._server_id = known_server_id
self.options = options
self.server_choice = None
self._accounts = []
self._owner_username = None
self._plextv_clients = None
self._plextv_client_timestamp = 0
self._client_device_cache = {}
self._use_plex_tv = self._token is not None
self._version = None
self.async_update_platforms = Debouncer(
hass,
_LOGGER,
cooldown=DEBOUNCE_TIMEOUT,
immediate=True,
function=self._async_update_platforms,
).async_call
self.thumbnail_cache = {}
# Header conditionally added as it is not available in config entry v1
if CONF_CLIENT_ID in server_config:
plexapi.X_PLEX_IDENTIFIER = server_config[CONF_CLIENT_ID]
plexapi.myplex.BASE_HEADERS = plexapi.reset_base_headers()
plexapi.server.BASE_HEADERS = plexapi.reset_base_headers()
@property
def account(self):
"""Return a MyPlexAccount instance."""
if not self._plex_account and self._use_plex_tv:
try:
self._plex_account = plexapi.myplex.MyPlexAccount(token=self._token)
except (BadRequest, Unauthorized):
self._use_plex_tv = False
_LOGGER.error("Not authorized to access plex.tv with provided token")
raise
return self._plex_account
def plextv_clients(self):
"""Return available clients linked to Plex account."""
if self.account is None:
return []
now = time.time()
if now - self._plextv_client_timestamp > PLEXTV_THROTTLE:
self._plextv_client_timestamp = now
self._plextv_clients = [
x
for x in self.account.resources()
if "player" in x.provides and x.presence and x.publicAddressMatches
]
_LOGGER.debug(
"Current available clients from plex.tv: %s", self._plextv_clients
)
return self._plextv_clients
def connect(self):
"""Connect to a Plex server directly, obtaining direct URL if necessary."""
config_entry_update_needed = False
def _connect_with_token():
all_servers = [
x for x in self.account.resources() if "server" in x.provides
]
servers = [x for x in all_servers if x.presence] or all_servers
available_servers = [(x.name, x.clientIdentifier) for x in servers]
if not available_servers:
raise NoServersFound
if not self._server_name and len(available_servers) > 1:
raise ServerNotSpecified(available_servers)
self.server_choice = (
self._server_name if self._server_name else available_servers[0][0]
)
self._plex_server = self.account.resource(self.server_choice).connect(
timeout=10
)
def _connect_with_url():
session = None
if self._url.startswith("https") and not self._verify_ssl:
session = Session()
session.verify = False
self._plex_server = plexapi.server.PlexServer(
self._url, self._token, session
)
def _update_plexdirect_hostname():
matching_servers = [
x.name
for x in self.account.resources()
if x.clientIdentifier == self._server_id
]
if matching_servers:
self._plex_server = self.account.resource(matching_servers[0]).connect(
timeout=10
)
return True
_LOGGER.error("Attempt to update plex.direct hostname failed")
return False
if self._url:
try:
_connect_with_url()
except requests.exceptions.SSLError as error:
while error and not isinstance(error, ssl.SSLCertVerificationError):
error = error.__context__
if isinstance(error, ssl.SSLCertVerificationError):
domain = urlparse(self._url).netloc.split(":")[0]
if domain.endswith("plex.direct") and error.args[0].startswith(
f"hostname '{domain}' doesn't match"
):
_LOGGER.warning(
"Plex SSL certificate's hostname changed, updating"
)
if _update_plexdirect_hostname():
config_entry_update_needed = True
else:
raise Unauthorized( # pylint: disable=raise-missing-from
"New certificate cannot be validated with provided token"
)
else:
raise
else:
raise
else:
_connect_with_token()
try:
system_accounts = self._plex_server.systemAccounts()
shared_users = self.account.users() if self.account else []
except Unauthorized:
_LOGGER.warning(
"Plex account has limited permissions, shared account filtering will not be available"
)
else:
self._accounts = []
for user in shared_users:
for shared_server in user.servers:
if shared_server.machineIdentifier == self.machine_identifier:
self._accounts.append(user.title)
_LOGGER.debug("Linked accounts: %s", self.accounts)
owner_account = next(
(account.name for account in system_accounts if account.accountID == 1),
None,
)
if owner_account:
self._owner_username = owner_account
self._accounts.append(owner_account)
_LOGGER.debug("Server owner found: '%s'", self._owner_username)
self._version = self._plex_server.version
if config_entry_update_needed:
raise ShouldUpdateConfigEntry
@callback
def async_refresh_entity(self, machine_identifier, device, session, source):
"""Forward refresh dispatch to media_player."""
unique_id = f"{self.machine_identifier}:{machine_identifier}"
_LOGGER.debug("Refreshing %s", unique_id)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(unique_id),
device,
session,
source,
)
async def async_update_session(self, payload):
"""Process a session payload received from a websocket callback."""
session_payload = payload["PlaySessionStateNotification"][0]
if (state := session_payload["state"]) == "buffering":
return
session_key = int(session_payload["sessionKey"])
offset = int(session_payload["viewOffset"])
rating_key = int(session_payload["ratingKey"])
unique_id, active_session = next(
(
(unique_id, session)
for unique_id, session in self.active_sessions.items()
if session.session_key == session_key
),
(None, None),
)
if not active_session:
await self.async_update_platforms()
return
if state == "stopped":
self.active_sessions.pop(unique_id, None)
else:
active_session.state = state
active_session.media_position = offset
def update_with_new_media():
"""Update an existing session with new media details."""
media = self.fetch_item(rating_key)
active_session.update_media(media)
if active_session.media_content_id != rating_key and state in (
"playing",
"paused",
):
await self.hass.async_add_executor_job(update_with_new_media)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(unique_id),
state,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
)
def _fetch_platform_data(self):
"""Fetch all data from the Plex server in a single method."""
return (
self._plex_server.clients(),
self._plex_server.sessions(),
self.plextv_clients(),
)
async def _async_update_platforms(self): # noqa: C901
"""Update the platform entities."""
_LOGGER.debug("Updating devices")
await self.hass.data[DOMAIN][GDM_DEBOUNCER]()
available_clients = {}
ignored_clients = set()
new_clients = set()
monitored_users = self.accounts
known_accounts = set(self.option_monitored_users)
if known_accounts:
monitored_users = {
user
for user in self.option_monitored_users
if self.option_monitored_users[user]["enabled"]
}
if not self.option_ignore_new_shared_users:
for new_user in self.accounts - known_accounts:
monitored_users.add(new_user)
try:
devices, sessions, plextv_clients = await self.hass.async_add_executor_job(
self._fetch_platform_data
)
except plexapi.exceptions.Unauthorized:
_LOGGER.debug(
"Token has expired for '%s', reloading integration", self.friendly_name
)
await self.hass.config_entries.async_reload(self.entry_id)
return
except (
plexapi.exceptions.BadRequest,
requests.exceptions.RequestException,
) as ex:
_LOGGER.error(
"Could not connect to Plex server: %s (%s)", self.friendly_name, ex
)
return
def process_device(source, device):
self._known_idle.discard(device.machineIdentifier)
available_clients.setdefault(device.machineIdentifier, {"device": device})
available_clients[device.machineIdentifier].setdefault(
PLAYER_SOURCE, source
)
if (
device.machineIdentifier not in ignored_clients
and self.option_ignore_plexweb_clients
and device.product == "Plex Web"
):
ignored_clients.add(device.machineIdentifier)
if device.machineIdentifier not in self._known_clients:
_LOGGER.debug(
"Ignoring %s %s: %s",
"Plex Web",
source,
device.machineIdentifier,
)
return
if device.machineIdentifier not in (
self._created_clients | ignored_clients | new_clients
):
new_clients.add(device.machineIdentifier)
_LOGGER.debug(
"New %s from %s: %s",
device.product,
source,
device.machineIdentifier,
)
def connect_to_client(source, baseurl, machine_identifier, name="Unknown"):
"""Connect to a Plex client and return a PlexClient instance."""
try:
client = PlexClient(
server=self._plex_server,
baseurl=baseurl,
identifier=machine_identifier,
token=self._plex_server.createToken(),
)
except (NotFound, requests.exceptions.ConnectionError):
_LOGGER.error(
"Direct client connection failed, will try again: %s (%s)",
name,
baseurl,
)
except Unauthorized:
_LOGGER.error(
"Direct client connection unauthorized, ignoring: %s (%s)",
name,
baseurl,
)
self._client_device_cache[machine_identifier] = None
else:
self._client_device_cache[client.machineIdentifier] = client
process_device(source, client)
def connect_to_resource(resource):
"""Connect to a plex.tv resource and return a Plex client."""
try:
client = resource.connect(timeout=3)
_LOGGER.debug("Resource connection successful to plex.tv: %s", client)
except NotFound:
_LOGGER.error(
"Resource connection failed to plex.tv: %s", resource.name
)
else:
client.proxyThroughServer(value=False, server=self._plex_server)
self._client_device_cache[client.machineIdentifier] = client
process_device("plex.tv", client)
def connect_new_clients():
"""Create connections to newly discovered clients."""
for gdm_entry in self.hass.data[DOMAIN][GDM_SCANNER].entries:
machine_identifier = gdm_entry["data"]["Resource-Identifier"]
if machine_identifier in self._client_device_cache:
client = self._client_device_cache[machine_identifier]
if client is not None:
process_device("GDM", client)
elif machine_identifier not in available_clients:
baseurl = (
f"http://{gdm_entry['from'][0]}:{gdm_entry['data']['Port']}"
)
name = gdm_entry["data"]["Name"]
connect_to_client("GDM", baseurl, machine_identifier, name)
for plextv_client in plextv_clients:
if plextv_client.clientIdentifier in self._client_device_cache:
client = self._client_device_cache[plextv_client.clientIdentifier]
if client is not None:
process_device("plex.tv", client)
elif plextv_client.clientIdentifier not in available_clients:
connect_to_resource(plextv_client)
def process_sessions():
live_session_keys = {x.sessionKey for x in sessions}
for unique_id, session in list(self.active_sessions.items()):
if session.session_key not in live_session_keys:
_LOGGER.debug("Purging unknown session: %s", session.session_key)
self.active_sessions.pop(unique_id)
for session in sessions:
if session.TYPE == "photo":
_LOGGER.debug("Photo session detected, skipping: %s", session)
continue
session_username = session.usernames[0]
for player in session.players:
unique_id = f"{self.machine_identifier}:{player.machineIdentifier}"
if unique_id not in self.active_sessions:
_LOGGER.debug("Creating new Plex session: %s", session)
self.active_sessions[unique_id] = PlexSession(self, session)
if session_username and session_username not in monitored_users:
ignored_clients.add(player.machineIdentifier)
_LOGGER.debug(
"Ignoring %s client owned by '%s'",
player.product,
session_username,
)
continue
process_device("session", player)
available_clients[player.machineIdentifier][
"session"
] = self.active_sessions[unique_id]
for device in devices:
process_device("PMS", device)
def sync_tasks():
connect_new_clients()
process_sessions()
await self.hass.async_add_executor_job(sync_tasks)
new_entity_configs = []
for client_id, client_data in available_clients.items():
if client_id in ignored_clients:
continue
if client_id in new_clients:
new_entity_configs.append(client_data)
self._created_clients.add(client_id)
else:
self.async_refresh_entity(
client_id,
client_data["device"],
client_data.get("session"),
client_data.get(PLAYER_SOURCE),
)
self._known_clients.update(new_clients | ignored_clients)
idle_clients = (
self._known_clients - self._known_idle - ignored_clients
).difference(available_clients)
for client_id in idle_clients:
self.async_refresh_entity(client_id, None, None, None)
self._known_idle.add(client_id)
self._client_device_cache.pop(client_id, None)
if new_entity_configs:
async_dispatcher_send(
self.hass,
PLEX_NEW_MP_SIGNAL.format(self.machine_identifier),
new_entity_configs,
)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.machine_identifier),
)
@property
def plex_server(self):
"""Return the plexapi PlexServer instance."""
return self._plex_server
@property
def has_token(self):
"""Return if a token is used to connect to this Plex server."""
return self._token is not None
@property
def accounts(self):
"""Return accounts associated with the Plex server."""
return set(self._accounts)
@property
def owner(self):
"""Return the Plex server owner username."""
return self._owner_username
@property
def version(self):
"""Return the version of the Plex server."""
return self._version
@property
def friendly_name(self):
"""Return name of connected Plex server."""
return self._plex_server.friendlyName
@property
def machine_identifier(self):
"""Return unique identifier of connected Plex server."""
return self._plex_server.machineIdentifier
@property
def url_in_use(self):
"""Return URL used for connected Plex server."""
return self._plex_server._baseurl # pylint: disable=protected-access
@property
def option_ignore_new_shared_users(self):
"""Return ignore_new_shared_users option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_NEW_SHARED_USERS, False)
@property
def option_use_episode_art(self):
"""Return use_episode_art option."""
return self.options[MP_DOMAIN].get(CONF_USE_EPISODE_ART, False)
@property
def option_monitored_users(self):
"""Return dict of monitored users option."""
return self.options[MP_DOMAIN].get(CONF_MONITORED_USERS, {})
@property
def option_ignore_plexweb_clients(self):
"""Return ignore_plex_web_clients option."""
return self.options[MP_DOMAIN].get(CONF_IGNORE_PLEX_WEB_CLIENTS, False)
@property
def library(self):
"""Return library attribute from server object."""
return self._plex_server.library
def playlist(self, title):
"""Return playlist from server object."""
return self._plex_server.playlist(title)
def playlists(self):
"""Return available playlists from server object."""
return self._plex_server.playlists()
def create_playqueue(self, media, **kwargs):
"""Create playqueue on Plex server."""
return plexapi.playqueue.PlayQueue.create(self._plex_server, media, **kwargs)
def get_playqueue(self, playqueue_id):
"""Retrieve existing playqueue from Plex server."""
return plexapi.playqueue.PlayQueue.get(self._plex_server, playqueue_id)
def fetch_item(self, item):
"""Fetch item from Plex server."""
return self._plex_server.fetchItem(item)
def lookup_media(self, media_type, **kwargs):
"""Lookup a piece of media."""
media_type = media_type.lower()
if isinstance(kwargs.get("plex_key"), int):
key = kwargs["plex_key"]
try:
return self.fetch_item(key)
except NotFound:
_LOGGER.error("Media for key %s not found", key)
return None
if media_type == MEDIA_TYPE_PLAYLIST:
try:
playlist_name = kwargs["playlist_name"]
return self.playlist(playlist_name)
except KeyError:
_LOGGER.error("Must specify 'playlist_name' for this search")
return None
except NotFound:
_LOGGER.error(
"Playlist '%s' not found",
playlist_name,
)
return None
try:
library_name = kwargs.pop("library_name")
library_section = self.library.section(library_name)
except KeyError:
_LOGGER.error("Must specify 'library_name' for this search")
return None
except NotFound:
_LOGGER.error("Library '%s' not found", library_name)
return None
try:
if media_type == MEDIA_TYPE_EPISODE:
return lookup_tv(library_section, **kwargs)
if media_type == MEDIA_TYPE_MOVIE:
return lookup_movie(library_section, **kwargs)
if media_type == MEDIA_TYPE_MUSIC:
return lookup_music(library_section, **kwargs)
if media_type == MEDIA_TYPE_VIDEO:
# Legacy method for compatibility
try:
video_name = kwargs["video_name"]
return library_section.get(video_name)
except KeyError:
_LOGGER.error("Must specify 'video_name' for this search")
return None
except NotFound as err:
raise MediaNotFound(f"Video {video_name}") from err
except MediaNotFound as failed_item:
_LOGGER.error("%s not found in %s", failed_item, library_name)
return None
@property
def sensor_attributes(self):
"""Return active session information for use in activity sensor."""
return {x.sensor_user: x.sensor_title for x in self.active_sessions.values()}
| lukas-hetzenecker/home-assistant | homeassistant/components/plex/server.py | Python | apache-2.0 | 25,421 |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Prediction Cell class that can be evolved in semantic segmentation.
DensePredictionCell is used as a `layer` in semantic segmentation whose
architecture is determined by the `config`, a dictionary specifying
the architecture.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from third_party.deeplab.core import utils
slim = contrib_slim
# Local constants.
_META_ARCHITECTURE_SCOPE = 'meta_architecture'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_OP = 'op'
_CONV = 'conv'
_PYRAMID_POOLING = 'pyramid_pooling'
_KERNEL = 'kernel'
_RATE = 'rate'
_GRID_SIZE = 'grid_size'
_TARGET_SIZE = 'target_size'
_INPUT = 'input'
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply dropout on the concatenated
features or not.
- dropout_on_projection_features: Boolean, apply dropout on the projection
features or not.
- dropout_keep_prob: Float, when `dropout_on_concat_features' or
`dropout_on_projection_features' is True, the `keep_prob` value used
in the dropout operation.
- concat_channels: Integer, the concatenated features will be
channel-reduced to `concat_channels` channels.
- conv_rate_multiplier: Integer, used to multiply the convolution rates.
This is useful in the case when the output_stride is changed from 16
to 8, we need to double the convolution rates correspondingly.
"""
return {
'reduction_size': 256,
'dropout_on_concat_features': True,
'dropout_on_projection_features': False,
'dropout_keep_prob': 0.9,
'concat_channels': 256,
'conv_rate_multiplier': 1,
}
class DensePredictionCell(object):
"""DensePredictionCell class used as a 'layer' in semantic segmentation."""
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary returned by
dense_prediction_cell_hparams().
Raises:
ValueError: If `conv_rate_multiplier` has value < 1.
"""
self.hparams = dense_prediction_cell_hparams()
if hparams is not None:
self.hparams.update(hparams)
self.config = config
# Check values in hparams are valid or not.
if self.hparams['conv_rate_multiplier'] < 1:
raise ValueError('conv_rate_multiplier cannot have value < 1.')
def _get_pyramid_pooling_arguments(
self, crop_size, output_stride, image_grid, image_pooling_crop_size=None):
"""Gets arguments for pyramid pooling.
Args:
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_grid: A list of two integers, [image_grid_height, image_grid_width],
specifying the grid size of how the pyramid pooling will be performed.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A list of (resize_value, pooled_kernel)
"""
resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride)
resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride)
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = crop_size
pooled_height = utils.scale_dimension(
image_pooling_crop_size[0], 1. / (output_stride * image_grid[0]))
pooled_width = utils.scale_dimension(
image_pooling_crop_size[1], 1. / (output_stride * image_grid[1]))
return ([resize_height, resize_width], [pooled_height, pooled_width])
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parameters for one
operation.
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A dictionary stores the related information for the operation.
"""
if config[_OP] == _PYRAMID_POOLING:
(config[_TARGET_SIZE],
config[_KERNEL]) = self._get_pyramid_pooling_arguments(
crop_size=crop_size,
output_stride=output_stride,
image_grid=config[_GRID_SIZE],
image_pooling_crop_size=image_pooling_crop_size)
return config
def build_cell(self,
features,
output_stride=16,
crop_size=None,
image_pooling_crop_size=None,
weight_decay=0.00004,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
scope=None):
"""Builds the dense prediction cell based on the config.
Args:
features: Input feature map of size [batch, height, width, channels].
output_stride: Int, output stride at which the features were extracted.
crop_size: A list [crop_height, crop_width], determining the input
features resolution.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
weight_decay: Float, the weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Boolean, is training or not.
fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not.
scope: Optional string, specifying the variable scope.
Returns:
Features after passing through the constructed dense prediction cell with
shape = [batch, height, width, channels] where channels are determined
by `reduction_size` returned by dense_prediction_cell_hparams().
Raises:
ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or
the operation is not recognized.
"""
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale': True,
}
hparams = self.hparams
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]):
depth = hparams['reduction_size']
branch_logits = []
for i, current_config in enumerate(self.config):
scope = 'branch%d' % i
current_config = self._parse_operation(
config=current_config,
crop_size=crop_size,
output_stride=output_stride,
image_pooling_crop_size=image_pooling_crop_size)
tf.logging.info(current_config)
if current_config[_INPUT] < 0:
operation_input = features
else:
operation_input = branch_logits[current_config[_INPUT]]
if current_config[_OP] == _CONV:
if current_config[_KERNEL] == [1, 1] or current_config[
_KERNEL] == 1:
branch_logits.append(
slim.conv2d(operation_input, depth, 1, scope=scope))
else:
conv_rate = [r * hparams['conv_rate_multiplier']
for r in current_config[_RATE]]
branch_logits.append(
utils.split_separable_conv2d(
operation_input,
filters=depth,
kernel_size=current_config[_KERNEL],
rate=conv_rate,
weight_decay=weight_decay,
scope=scope))
elif current_config[_OP] == _PYRAMID_POOLING:
pooled_features = slim.avg_pool2d(
operation_input,
kernel_size=current_config[_KERNEL],
stride=[1, 1],
padding='VALID')
pooled_features = slim.conv2d(
pooled_features,
depth,
1,
scope=scope)
pooled_features = tf.image.resize_bilinear(
pooled_features,
current_config[_TARGET_SIZE],
align_corners=True)
# Set shape for resize_height/resize_width if they are not Tensor.
resize_height = current_config[_TARGET_SIZE][0]
resize_width = current_config[_TARGET_SIZE][1]
if isinstance(resize_height, tf.Tensor):
resize_height = None
if isinstance(resize_width, tf.Tensor):
resize_width = None
pooled_features.set_shape(
[None, resize_height, resize_width, depth])
branch_logits.append(pooled_features)
else:
raise ValueError('Unrecognized operation.')
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if self.hparams['dropout_on_concat_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
concat_logits = slim.conv2d(concat_logits,
self.hparams['concat_channels'],
1,
scope=_CONCAT_PROJECTION_SCOPE)
if self.hparams['dropout_on_projection_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
return concat_logits
| googleinterns/wss | third_party/deeplab/core/dense_prediction_cell.py | Python | apache-2.0 | 12,180 |
import asyncore, socket, logging, time, asynchat, os
from hdfs_space_common import get_tree_from_cache, get_child_node, TreeNode
FORMAT = '%(asctime)-15s: %(levelname)s %(module)s - %(funcName)s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
class ChatHandler(asynchat.async_chat):
def __init__(self, sock):
asynchat.async_chat.__init__(self, sock = sock)
self.ibuffer = []
self.obuffer = ''
self.set_terminator("\n")
def collect_incoming_data(self, data):
self.ibuffer.append(data)
logging.info('Received data "%s"' % data)
def found_terminator(self):
self.handle_request()
def handle_request(self):
data = self.ibuffer.pop(0)
#Data should be like:
#metric:path|user|size
# OR
#db:new_path
command = data.split(":")[0]
if command == 'metric':
metric_args = data.split(":")[1].split('|')
hdfs_path = metric_args[0] if len(metric_args) > 0 else "/"
user_name = metric_args[1] if len(metric_args) > 1 else "ALL"
metric = metric_args[2] if len(metric_args) > 2 else "size"
logging.debug('metric_args: %s' % metric_args)
logging.debug('hdfs_path: %s' % hdfs_path)
logging.debug('user_name: %s' % user_name)
logging.debug('metric: %s' % metric)
result = 0
if user_name == "ALL" and metric == 'size':
logging.warning('Rather using this script try command "hdfs dfs -du /"')
elif user_name == "ALL" and metric == 'amount':
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_amount_for_all()
else:
if metric == "size":
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_size_by_user(user_name)
elif metric == "amount":
logging.info('Calculating the metric')
result = get_child_node(file_tree, hdfs_path).get_amount_by_user(user_name)
else:
logging.warning("The metric %s not implemented yet" % metric)
logging.info('The result is ready: %s. Pushing it to back' % result)
self.push(str(result))
return
elif command == 'db':
file_path = data.split(":")[1]
if os.path.exists(file_path):
global file_tree
file_tree = get_tree_from_cache(file_path)
os.rename(file_path,MetricServer.db_path)
logging.info('File %s remaned to %s' % (file_path, MetricServer.db_path))
self.push('OK')
else:
logging.warning('File %s could not be found. Doing nothing' % file_path)
self.push('FAIL')
else:
logging.warning("The command %s not implemented yet")
self.push('FAIL')
class MetricServer(asyncore.dispatcher):
sock_path = '/tmp/hdfs_space.sock'
db_path = '/tmp/hdfs_space.data'
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(self.sock_path)
logging.info('Starting metric-server')
self.listen(5)
global file_tree
try:
file_tree = get_tree_from_cache(self.db_path)
except KeyError as e:
file_tree = TreeNode('')
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
logging.info('Incoming connection')
handler = ChatHandler(sock)
def handle_close(self):
self.close()
logging.info('The socket is closed')
def handle_expt(self):
logging.info("OOB detected for %s" % self)
if __name__ == '__main__':
file_tree = None
server = MetricServer()
try:
asyncore.loop()
finally:
if os.path.exists(server.sock_path):
os.unlink(server.sock_path)
| arshvin/scripts | zabbix/T_hdfs_space_checker/hdfs_space_metric_server.py | Python | apache-2.0 | 3,517 |
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from string import punctuation
from heapq import nlargest
import re
"""
Modified from http://glowingpython.blogspot.co.uk/2014/09/text-summarization-with-nltk.html
"""
class FrequencySummarizer:
def __init__(self, low_thresh=0.1, high_thresh=0.9):
"""
Initialize the text summarizer.
Words that have a frequency term lower than low_thresh
or higer than high_thresh will be ignored.
"""
ignore = ['fig','figure','ibid', 'et al','cf','NB','N.B.']
self._low_thresh = low_thresh
self._high_thresh = high_thresh
self._stopwords = set(stopwords.words('english') + list(punctuation) + list(ignore))
def _compute_frequencies(self, word_tk):
freq = defaultdict(int)
for s in word_tk:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] = freq[w]/m
if freq[w] >= self._high_thresh or freq[w] <= self._low_thresh:
del freq[w]
return freq
def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
text = "".join([unicode(x) for x in text])
sents = sent_tokenize(text)
if n > len(sents):
n = len(sents)
word_tk = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_tk)
ranking = defaultdict(int)
for i,sent in enumerate(word_tk):
for w in sent:
if w in self._freq and len(w)>4: #Only count words of length>4 as significant
ranking[i] += self._freq[w]
sentsindx = self._rank(ranking, n)
return [sents[j].encode('ascii', errors='backslashreplace') for j in sentsindx]
def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking, key=ranking.get)
| rebeccamorgan/easyskim | nat_proc/FrequencySummarizer.py | Python | apache-2.0 | 2,032 |
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = 'trunk'
import struct
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def IPAddress(address, version=None):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, 4 or 6. If set, don't try to automatically
determine what the IP address type is. important for things
like IPAddress(1), which could be IPv4, '0.0.0.0.1', or IPv6,
'::1'.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
"""
if version:
if version == 4:
return IPv4Address(address)
elif version == 6:
return IPv6Address(address)
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _get_prefix_length(number1, number2, bits):
"""Get the number of leading bits that are same for two numbers.
Args:
number1: an integer.
number2: another integer.
bits: the maximum number of bits to compare.
Returns:
The number of leading bits that are the same for two numbers.
"""
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) % 2:
return i
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network'1.1.0.0/24')
ip2 = IPv4Network'1.1.1.0/24')
ip3 = IPv4Network'1.1.2.0/24')
ip4 = IPv4Network'1.1.3.0/24')
ip5 = IPv4Network'1.1.4.0/24')
ip6 = IPv4Network'1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array
def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key))
# backwards compatibility
CollapseAddrList = collapse_address_list
# Test whether this Python implementation supports byte objects that
# are not identical to str ones.
# We need to exclude platforms where bytes == str so that we can
# distinguish between packed representations and strings, for example
# b'12::' (the IPv4 address 49.50.58.58) and '12::' (an IPv6 address).
try:
_compat_has_real_bytes = bytes is not str
except NameError: # <Python2.6
_compat_has_real_bytes = False
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddr sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNet):
return obj._get_networks_key()
elif isinstance(obj, _BaseIP):
return obj._get_address_key()
return NotImplemented
class _IPAddrBase(object):
"""The mother class."""
def __index__(self):
return self._ip
def __int__(self):
return self._ip
def __hex__(self):
return hex(self._ip)
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
class _BaseIP(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __init__(self, address):
if '/' in str(address):
raise AddressValueError(address)
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip < other._ip
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseIP):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self._ip != other._ip:
return self._ip > other._ip
return False
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s' % self._string_from_ip_int(self._ip)
def __hash__(self):
return hash(hex(self._ip))
def _get_address_key(self):
return (self._version, self)
@property
def version(self):
raise NotImplementedError('BaseIP has no version')
class _BaseNet(_IPAddrBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def iterhosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
cur = int(self.network) + 1
bcast = int(self.broadcast) - 1
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __iter__(self):
cur = int(self.network)
bcast = int(self.broadcast)
while cur <= bcast:
cur += 1
yield IPAddress(cur - 1, version=self._version)
def __getitem__(self, n):
network = int(self.network)
broadcast = int(self.broadcast)
if n >= 0:
if network + n > broadcast:
raise IndexError
return IPAddress(network + n, version=self._version)
else:
n += 1
if broadcast + n < network:
raise IndexError
return IPAddress(broadcast + n, version=self._version)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network < other.network
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __gt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
str(self), str(other)))
if not isinstance(other, _BaseNet):
raise TypeError('%s and %s are not of the same type' % (
str(self), str(other)))
if self.network != other.network:
return self.network > other.network
if self.netmask != other.netmask:
return self.netmask > other.netmask
return False
def __le__(self, other):
gt = self.__gt__(other)
if gt is NotImplemented:
return NotImplemented
return not gt
def __ge__(self, other):
lt = self.__lt__(other)
if lt is NotImplemented:
return NotImplemented
return not lt
def __eq__(self, other):
try:
return (self._version == other._version
and self.network == other.network
and int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
def __str__(self):
return '%s/%s' % (str(self.ip),
str(self._prefixlen))
def __hash__(self):
return hash(int(self.network) ^ int(self.netmask))
def __contains__(self, other):
# dealing with another network.
if isinstance(other, _BaseNet):
return (int(self.network) <= int(other._ip) and
int(self.broadcast) >= int(other.broadcast))
# dealing with another address
else:
return (int(self.network) <= int(other._ip) <=
int(self.broadcast))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network in other or self.broadcast in other or (
other.network in self or other.broadcast in self)
@property
def network(self):
x = self._cache.get('network')
if x is None:
x = IPAddress(self._ip & int(self.netmask), version=self._version)
self._cache['network'] = x
return x
@property
def broadcast(self):
x = self._cache.get('broadcast')
if x is None:
x = IPAddress(self._ip | int(self.hostmask), version=self._version)
self._cache['broadcast'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
version=self._version)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (str(self.ip), self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (str(self.ip), str(self.netmask))
@property
def with_hostmask(self):
return '%s/%s' % (str(self.ip), str(self.hostmask))
@property
def numhosts(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast) - int(self.network) + 1
@property
def version(self):
raise NotImplementedError('BaseNet has no version')
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = IP('10.1.1.0/24')
addr2 = IP('10.1.1.0/26')
addr1.address_exclude(addr2) =
[IP('10.1.1.64/26'), IP('10.1.1.128/25')]
or IPv6:
addr1 = IP('::1/32')
addr2 = IP('::1/128')
addr1.address_exclude(addr2) = [IP('::0/128'),
IP('::2/127'),
IP('::4/126'),
IP('::8/125'),
...
IP('0:0:8000::/33')]
Args:
other: An IP object of the same type.
Returns:
A sorted list of IP objects addresses which is self minus
other.
Raises:
TypeError: If self and other are of difffering address
versions.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
str(self), str(other)))
if other not in self:
raise ValueError('%s not contained in %s' % (str(other),
str(self)))
ret_addrs = []
# Make sure we're comparing the network of other.
other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
version=other._version)
s1, s2 = self.subnet()
while s1 != other and s2 != other:
if other in s1:
ret_addrs.append(s2)
s1, s2 = s1.subnet()
elif other in s2:
ret_addrs.append(s1)
s1, s2 = s2.subnet()
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
if s1 == other:
ret_addrs.append(s2)
elif s2 == other:
ret_addrs.append(s1)
else:
# If we got here, there's a bug somewhere.
assert True == False, ('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(str(s1), str(s2), str(other)))
return sorted(ret_addrs, key=_BaseNet._get_networks_key)
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network, self.netmask)
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if not prefixlen and prefixlen != 0:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
while mask:
if ip_int & 1 == 1:
break
ip_int >>= 1
mask -= 1
return mask
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), return a list with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, str(self)))
first = IPNetwork('%s/%s' % (str(self.network),
str(self._prefixlen + prefixlen_diff)),
version=self._version)
yield first
current = first
while True:
broadcast = current.broadcast
if broadcast == self.broadcast:
return
new_addr = IPAddress(int(broadcast) + 1, version=self._version)
current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
version=self._version)
yield current
def subnet(self, prefixlen_diff=1, new_prefix=None):
"""Return a list of subnets, rather than an interator."""
return list(self.iter_subnets(prefixlen_diff, new_prefix))
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return IPNetwork('%s/%s' % (str(self.network),
str(self.prefixlen - prefixlen_diff)),
version=self._version)
# backwards compatibility
Subnet = subnet
Supernet = supernet
AddressExclude = address_exclude
CompareNetworks = compare_networks
Contains = __contains__
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**32) - 1
def __init__(self, address):
self._version = 4
self._max_prefixlen = 32
def _explode_shorthand_ip_string(self, ip_str=None):
if not ip_str:
ip_str = str(self)
return ip_str
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if the string isn't a valid IP string.
"""
packed_ip = 0
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
for oc in octets:
try:
packed_ip = (packed_ip << 8) | int(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
octets = []
for _ in xrange(4):
octets.insert(0, str(ip_int & 0xFF))
ip_int >>= 8
return '.'.join(octets)
def _is_valid_ip(self, address):
"""Validate the dotted decimal notation IP/netmask string.
Args:
address: A string, either representing a quad-dotted ip
or an integer which is a valid IPv4 IP address.
Returns:
A boolean, True if the string is a valid dotted decimal IP
string.
"""
octets = address.split('.')
if len(octets) == 1:
# We have an integer rather than a dotted decimal IP.
try:
return int(address) >= 0 and int(address) <= self._ALL_ONES
except ValueError:
return False
if len(octets) != 4:
return False
for octet in octets:
try:
if not 0 <= int(octet) <= 255:
return False
except ValueError:
return False
return True
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!I', self._ip)
@property
def version(self):
return self._version
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in IPv4Network('240.0.0.0/4')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
return (self in IPv4Network('10.0.0.0/8') or
self in IPv4Network('172.16.0.0/12') or
self in IPv4Network('192.168.0.0/16'))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in IPv4Network('224.0.0.0/4')
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self in IPv4Network('0.0.0.0')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in IPv4Network('127.0.0.0/8')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in IPv4Network('169.254.0.0/16')
class IPv4Address(_BaseV4, _BaseIP):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
'192.168.1.1'
Additionally, an integer can be passed, so
IPv4Address('192.168.1.1') == IPv4Address(3232235777).
or, more generally
IPv4Address(int(IPv4Address('192.168.1.1'))) ==
IPv4Address('192.168.1.1')
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
"""
_BaseIP.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not self._is_valid_ip(addr_str):
raise AddressValueError(addr_str)
self._ip = self._ip_int_from_string(addr_str)
class IPv4Network(_BaseV4, _BaseNet):
"""This class represents and manipulates 32-bit IPv4 networks.
Attributes: [examples for IPv4Network('1.2.3.4/27')]
._ip: 16909060
.ip: IPv4Address('1.2.3.4')
.network: IPv4Address('1.2.3.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast: IPv4Address('1.2.3.31')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address, strict=False):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.168.1.1/24'
'192.168.1.1/255.255.255.0'
'192.168.1.1/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.168.1.1'
'192.168.1.1/255.255.255.255'
'192.168.1.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.168.1.1') == IPv4Network(3232235777).
or, more generally
IPv4Network(int(IPv4Network('192.168.1.1'))) ==
IPv4Network('192.168.1.1')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If ipaddr isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 4:
self._ip = struct.unpack('!I', address)[0]
self.ip = IPv4Address(self._ip)
self._prefixlen = 32
self.netmask = IPv4Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv4Address(self._ip)
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%s is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError(addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = 32
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [int(x) for x in bits if int(x) in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
if [x for x in mask if int(x) not in self._valid_mask_octets]:
return False
if [x for idx, y in enumerate(mask) if idx > 0 and
y > mask[idx - 1]]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= 32
# backwards compatibility
IsRFC1918 = lambda self: self.is_private
IsMulticast = lambda self: self.is_multicast
IsLoopback = lambda self: self.is_loopback
IsLinkLocal = lambda self: self.is_link_local
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2**128) - 1
def __init__(self, address):
self._version = 6
self._max_prefixlen = 128
def _ip_int_from_string(self, ip_str=None):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IP Address.
"""
if not ip_str:
ip_str = str(self.ip)
ip_int = 0
fields = self._explode_shorthand_ip_string(ip_str).split(':')
# Do we have an IPv4 mapped (::ffff:a.b.c.d) or compact (::a.b.c.d)
# ip_str?
if fields[-1].count('.') == 3:
ipv4_string = fields.pop()
ipv4_int = IPv4Network(ipv4_string)._ip
octets = []
for _ in xrange(2):
octets.append(hex(ipv4_int & 0xFFFF).lstrip('0x').rstrip('L'))
ipv4_int >>= 16
fields.extend(reversed(octets))
for field in fields:
try:
ip_int = (ip_int << 16) + int(field or '0', 16)
except ValueError:
raise AddressValueError(ip_str)
return ip_int
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self, ip_str=None):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not ip_str:
ip_str = str(self)
if isinstance(self, _BaseNet):
ip_str = str(self.ip)
if self._is_shorthand_ip(ip_str):
new_ip = []
hextet = ip_str.split('::')
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(8 - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
# We've already got a longhand ip_str.
return ip_str
def _is_valid_ip(self, ip_str):
"""Ensure we have a valid IPv6 address.
Probably not as exhaustive as it should be.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = self._explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
IPv4Network(hextet)
except AddressValueError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _is_shorthand_ip(self, ip_str=None):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
return False
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def packed(self):
"""The binary representation of this address."""
return struct.pack('!QQ', self._ip >> 64, self._ip & (2**64 - 1))
@property
def version(self):
return self._version
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in IPv6Network('ff00::/8')
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self in IPv6Network('::/8') or
self in IPv6Network('100::/8') or
self in IPv6Network('200::/7') or
self in IPv6Network('400::/6') or
self in IPv6Network('800::/5') or
self in IPv6Network('1000::/4') or
self in IPv6Network('4000::/3') or
self in IPv6Network('6000::/3') or
self in IPv6Network('8000::/3') or
self in IPv6Network('A000::/3') or
self in IPv6Network('C000::/3') or
self in IPv6Network('E000::/4') or
self in IPv6Network('F000::/5') or
self in IPv6Network('F800::/6') or
self in IPv6Network('FE00::/9'))
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self == IPv6Network('::')
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self == IPv6Network('::1')
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in IPv6Network('fe80::/10')
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in IPv6Network('fec0::/10')
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return self in IPv6Network('fc00::/7')
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
hextets = self._explode_shorthand_ip_string().split(':')
if hextets[-3] != 'ffff':
return None
try:
return IPv4Address(int('%s%s' % (hextets[-2], hextets[-1]), 16))
except IPv4IpvalidationError:
return None
class IPv6Address(_BaseV6, _BaseIP):
"""Represent and manipulate single IPv6 Addresses.
"""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:4860::') ==
IPv6Address(42541956101370907050197289607612071936L).
or, more generally
IPv6Address(IPv6Address('2001:4860::')._ip) ==
IPv6Address('2001:4860::')
Raises:
IPv6IpValidationError: If address isn't a valid IPv6 address.
"""
_BaseIP.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if not addr_str:
raise AddressValueError('')
self._ip = self._ip_int_from_string(addr_str)
class IPv6Network(_BaseV6, _BaseNet):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
.ip: IPv6Address('2001:658:22a:cafe:200::1')
.network: IPv6Address('2001:658:22a:cafe::')
.hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
.broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
.netmask: IPv6Address('ffff:ffff:ffff:ffff::')
.prefixlen: 64
"""
def __init__(self, address, strict=False):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the IP
and prefix/netmask.
'2001:4860::/128'
'2001:4860:0000:0000:0000:0000:0000:0000/128'
'2001:4860::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:4860::') ==
IPv6Network(42541956101370907050197289607612071936L).
or, more generally
IPv6Network(IPv6Network('2001:4860::')._ip) ==
IPv6Network('2001:4860::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 192.168.1.0/24 and not an
IP address on a network, eg, 192.168.1.1/24.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNet.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, (int, long)):
self._ip = address
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
if address < 0 or address > self._ALL_ONES:
raise AddressValueError(address)
return
# Constructing from a packed address
if _compat_has_real_bytes:
if isinstance(address, bytes) and len(address) == 16:
tmp = struct.unpack('!QQ', address)
self._ip = (tmp[0] << 64) | tmp[1]
self.ip = IPv6Address(self._ip)
self._prefixlen = 128
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError(address)
if not self._is_valid_ip(addr[0]):
raise AddressValueError(addr[0])
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError(addr[1])
else:
self._prefixlen = 128
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
self._ip = self._ip_int_from_string(addr[0])
self.ip = IPv6Address(self._ip)
if strict:
if self.ip != self.network:
raise ValueError('%s has host bits set' %
self.ip)
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= 128
| nouiz/fredericbastien-ipaddr-py-speed-up | tags/2.1.0/ipaddr.py | Python | apache-2.0 | 58,769 |
# directory = 'Congressional_Bill_Corpus.v1.00/raw/'
directory = ''
text_file = directory + 'billtext_org.json'
labels_file = directory + 'train.json'
output_dir = '/Users/katya/datasets/congress_bills_2/'
import sys
#pcogennen noah's congress_bills_2 into useable format
#to dict:
def skip_ahead_n_quotes(line, char_counter, maximum):
quote_counter = 0
while quote_counter < maximum:
if line[char_counter:char_counter+1] == '\"':
quote_counter += 1
char_counter += 1
# print 'to',line[char_counter:char_counter+10]
return char_counter
def parse_inside_char(line, char_counter, char):
string = ''
while line[char_counter] != char:
string += line[char_counter]
char_counter += 1
return string, char_counter
def rm_newlines(string):
# string.replace('\\\n', ' ')
string = string.replace('\\' + 'n', ' ')
for i in range(1,10):
string = string.replace(' ', ' ')
return string
def tokenize(line):
list_of_words = []
word = ''
for char in line:
if char == ' ':
list_of_words.append(word)
word = ''
else:
word += char
list_of_words.append(word.strip())
return tuple(list_of_words)
d = {}
for line in open(text_file):
if "\"\"" in line:
d[name] = ''
else:
# d = json.load(json_data)
# print d
char_counter = 0
# print "success"
name, char_counter = parse_inside_char(line, char_counter, '\t')
# print 'parse'
if '\"body\"' in line:
char_counter = skip_ahead_n_quotes(line, char_counter, 2)
# print 'skip ahead'
char_counter += 3
body, char_counter = parse_inside_char(line, char_counter, '\"')
# print 'parsed'
else:
body = ''
char_counter = skip_ahead_n_quotes(line, char_counter, 3)
char_counter += 3
# print 'skip 2'
title, char_counter = parse_inside_char(line, char_counter, '\"')
# print 'parsed2'
d[name] = rm_newlines(title) + ' ' + rm_newlines(body)
print 'quit'
with open(labels_file, 'r') as labels, open(output_dir + 'train.data', 'w') as data_out, open(output_dir + 'train.labels', 'w') as labels_out:
for line in labels:
line = line.replace('\t', ' ')
example_name, label = tokenize(line)
try:
data_out.write(d[example_name] + '\n')
except KeyError:
print example_name
else:
labels_out.write(label + '\n')
sys.stdout.flush()
print 'done'
| Noahs-ARK/ARKcat | src/scripts_and_misc/process_json.py | Python | apache-2.0 | 2,711 |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a Python package of the Linux guest environment."""
import glob
import sys
import setuptools
install_requires = ['setuptools']
if sys.version_info < (3, 0):
install_requires += ['boto']
if sys.version_info >= (3, 7):
install_requires += ['distro']
setuptools.setup(
author='Google Compute Engine Team',
author_email='[email protected]',
description='Google Compute Engine',
include_package_data=True,
install_requires=install_requires,
license='Apache Software License',
long_description='Google Compute Engine guest environment.',
name='google-compute-engine',
packages=setuptools.find_packages(),
url='https://github.com/GoogleCloudPlatform/compute-image-packages',
version='20191112.0',
# Entry points create scripts in /usr/bin that call a function.
entry_points={
'console_scripts': [
'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main',
'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main',
'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main',
'google_network_daemon=google_compute_engine.networking.network_daemon:main',
'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
)
| illfelder/compute-image-packages | packages/python-google-compute-engine/setup.py | Python | apache-2.0 | 3,041 |
#!/usr/bin/env python
# coding=utf-8
from webapp.web import Application
from handlers.index import IndexHandler
from handlers.register import RegisterHandler
from handlers.user import UserHandler
from handlers.signin import SigninHandler
from handlers.signout import SignoutHandler
from handlers.upload import UploadHandler
from handlers.avatar import AvatarHandler
from handlers.error import ErrorHandler
from handlers.password import PasswordHandler
from handlers.ftypeerror import FiletypeErrorHandler
URLS = (
("/", "IndexHandler"),
("/register?", "RegisterHandler"),
("/user", "UserHandler"),
("/signin", "SigninHandler"),
("/signout", "SignoutHandler"),
("/upload", "UploadHandler"),
("/avatar/(.*)", "AvatarHandler"),
("/error", "ErrorHandler"),
("/pwdchange", "PasswordHandler"),
("/ftypeerror", "FiletypeErrorHandler")
)
if __name__ == '__main__':
app = Application(globals(), URLS)
app.run()
| vincentpc/yagra | main.py | Python | apache-2.0 | 955 |
Subsets and Splits