repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
fkie/rosrepo | src/rosrepo/ui.py | 1 | 12256 | # coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
from getpass import getpass
import re
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
from .util import isatty, get_terminal_size, UserError
from .terminal_color import fmt as color_fmt
RARROW = None
LARROW = None
FF_LARROW = None
FF_RARROW = None
HTHIN = None
HTHICK = None
THEAD = None
TFOOT = None
HSEP = None
XSEP = None
def enable_unicode_graphics():
global RARROW, LARROW, FF_LARROW, FF_RARROW, HTHIN, HTHICK, THEAD, TFOOT, HSEP, XSEP, _use_unicode_graphics
RARROW = u"━▶".encode(sys.stdout.encoding)
LARROW = u"◀━".encode(sys.stdout.encoding)
FF_LARROW = u"◀◀".encode(sys.stdout.encoding)
FF_RARROW = u"▶▶".encode(sys.stdout.encoding)
HTHIN = u"─".encode(sys.stdout.encoding)
HTHICK = u"━".encode(sys.stdout.encoding)
HSEP = u"│".encode(sys.stdout.encoding)
THEAD = u"━┯━".encode(sys.stdout.encoding)
TFOOT = u"━┷━".encode(sys.stdout.encoding)
XSEP = u"─┼─".encode(sys.stdout.encoding)
_use_unicode_graphics = True
def disable_unicode_graphics():
global RARROW, LARROW, FF_LARROW, FF_RARROW, HTHIN, HTHICK, THEAD, TFOOT, HSEP, XSEP, _use_unicode_graphics
RARROW = "->"
LARROW = "<-"
FF_LARROW = "<<"
FF_RARROW = ">>"
HTHIN = "-"
HTHICK = "-"
THEAD = "-+-"
TFOOT = "-+-"
HSEP = "|"
XSEP = "-+-"
_use_unicode_graphics = False
try:
u"─━▶◀┷┯┼".encode(sys.stdout.encoding)
enable_unicode_graphics()
except (UnicodeEncodeError, TypeError, AttributeError):
disable_unicode_graphics()
_ansi_escape = re.compile(r"\x1b[^m]*m")
def remove_ansi(text):
global _ansi_escape
return _ansi_escape.sub("", text)
def printed_len(text):
global _ansi_escape
i = 0
result = []
matches = _ansi_escape.finditer(text)
for m in matches:
result += list(range(i, m.start()))
i = m.end()
result += list(range(i, len(text)))
return result
def slice_ansi_text(text, chunk_size, fill=" ", pad=True):
ll = printed_len(text)
initial_ansi = text[:ll[0]] if ll else ""
excess = len(ll) % chunk_size
if pad and excess > 0:
padded_text = text + fill[0] * (chunk_size - excess)
else:
padded_text = text
return list(initial_ansi + padded_text[ll[i]:ll[chunk_size + i] if chunk_size + i < len(ll) else len(padded_text)] for i in range(0, len(ll), chunk_size))
def pad_ansi_text(text, width, truncate=True, fill=" "):
L = printed_len(text)
length = len(L)
if width < length:
return text[:L[width]] if truncate else text
return text + fill[0] * (width - length)
def wrap_ansi_text(text, width, indent=0, indent_first=None, indent_next=None, suffix=""):
if width is None:
return text
if indent_first is None:
indent_first = indent
if indent_next is None:
indent_next = indent
result = []
chunks = text.split("\n")
sl = len(suffix)
skip_blank = False
for chunk in chunks:
count = indent_first
line = []
empty_paragraph = True
if indent_first > 0:
line.append(" " * (indent_first - 1))
for word in chunk.split(" "):
L = len(remove_ansi(word))
if L != 0 or not skip_blank:
if L != 0:
skip_blank = False
empty_paragraph = False
if count + L <= width - sl:
line.append(word)
count += L + 1
else:
result.append(" ".join(line))
line = []
count = indent_next
if indent_next > 0:
line.append(" " * (indent_next - 1))
if L == 0:
skip_blank = True
else:
line.append(word)
count += L + 1
result.append("" if skip_blank or empty_paragraph else " ".join(line))
return (suffix + "\n").join(result)
def reformat_paragraphs(text):
paragraphs = re.split("[\r|\n]+\s*[\r|\n]+", text.strip())
result = []
for p in paragraphs:
lines = [l.strip() for l in p.split("\n")]
result.append(" ".join(lines))
return "\n\n".join(result)
def escape(msg):
return msg.replace("@", "@@")
def msg(text, max_width=None, use_color=None, wrap=True, fd=None, **wrap_args):
from .terminal_color import ansi
if fd is None:
fd = sys.stderr
if use_color is None:
use_color = isatty(fd)
ansi_text = color_fmt(text, use_color=use_color)
if wrap:
try:
if max_width is None:
max_width, _ = get_terminal_size()
except OSError:
pass
fd.write(wrap_ansi_text(ansi_text, max_width, **wrap_args) + (ansi('reset') if use_color else ""))
def fatal(text):
raise UserError(text)
def error(text, use_color=None):
prog = "rosrepo"
msg("@!@{rf}%s: error: %s" % (prog, text), indent_next=len(prog) + 9)
def warning(text, use_color=None):
prog = "rosrepo"
msg("@!@{yf}%s: warning: %s" % (prog, text), use_color=use_color, indent_next=len(prog) + 11)
def readline(prompt, fd=None):
if fd is None:
fd = sys.stderr
fd.write(prompt)
fd.flush()
return sys.stdin.readline().rstrip("\r\n")
def ask_username_and_password(domain):
if not isatty(sys.stdin):
fatal("Need TTY to query credentials\n")
if isatty(sys.stderr):
fd = sys.stderr
elif isatty(sys.stdout):
fd = sys.stdout
else:
fatal("Need TTY to query credentials\n")
msg("\n@!Authentication required for @{cf}%s\n" % domain, fd=fd)
while True:
login = readline("Username: ", fd=fd)
if login == "":
continue
passwd = getpass("Password: ")
if passwd == "":
msg("Starting over\n\n", fd=fd)
continue
return login, passwd
def ask_personal_access_token(domain):
if not isatty(sys.stdin):
fatal("Need TTY to query credentials\n")
if isatty(sys.stderr):
fd = sys.stderr
elif isatty(sys.stdout):
fd = sys.stdout
else:
fatal("Need TTY to query credentials\n")
msg("\n@!Authentication required for @{cf}%s\n" % domain, fd=fd)
return readline("Personal Access Token: ", fd=fd)
def pick_dependency_resolution(package_name, pkg_list):
if not isatty(sys.stdin):
return None
if isatty(sys.stderr):
fd = sys.stderr
elif isatty(sys.stdout):
fd = sys.stdout
else:
return None
result = None
while result is None:
msg("\n@!Dependency resolution for @{cf}%s@|\n" % package_name, fd=fd)
msg(
"The package is not in your workspace and can be cloned from "
"multiple Git repositories. Please pick the one you want:\n\n", fd=fd
)
for i in range(len(pkg_list)):
msg("%3d. %s\n" % (i + 1, pkg_list[i].project.website), fd=fd)
msg("%3d. %s\n\n" % (0, "Choose automatically"), fd=fd)
try:
s = int(readline("--> ", fd=fd))
if s == 0:
return None
result = pkg_list[s - 1]
except (ValueError, IndexError):
msg("@!@{rf}Invalid choice@|\n\n", fd=fd)
return result
def show_conflicts(conflicts):
for name in sorted(conflicts.keys()):
error("cannot use package '%s'\n" % escape(name))
for reason in conflicts[name]:
msg(" - %s\n" % reason, indent_next=5)
def show_missing_system_depends(missing):
if missing:
from .resolver import get_system_package_manager
msg(
"You need to install additional resources on this computer to satisfy all dependencies. "
"Please run the following command:\n\n"
)
msg("@!" + get_system_package_manager().installer_cmd + " " + " ".join(sorted(list(missing))), indent_first=4, indent_next=len(get_system_package_manager().installer_cmd) + 5, suffix=" \\")
msg("\n\n")
def textify(s, fd=None):
if fd is None:
fd = sys.stdout
if hasattr(s, "encode"):
return s.encode(fd.encoding if hasattr(fd, "encoding") and fd.encoding else "UTF-8")
return str(s)
class TableView(object):
def __init__(self, *args, **kwargs):
self.columns = args
self.expand = kwargs.get("expand", False)
if not self.columns:
self.width = [1, 1]
else:
self.width = [max(1, len(remove_ansi(color_fmt(c)))) for c in self.columns]
self.rows = []
def add_row(self, *args):
row = [r if isinstance(r, list) or isinstance(r, tuple) else (r,) for r in args]
row = [r if isinstance(r, tuple) or len(r) > 0 else [""] for r in row] # Handle special case with empty lists
assert len(row) == len(self.width)
self.rows.append(row)
self.width = [max(w, *(len(remove_ansi(color_fmt(r))) for r in rs)) for w, rs in zip(self.width, row)]
def add_separator(self):
self.rows.append(None)
def empty(self):
return len(self.rows) == 0
def sort(self, column_index):
self.rows.sort(key=lambda x: x[column_index])
def write(self, fd=None, use_color=None):
width = self.width
actual_width = sum(width) + 3 * len(width) - 1
try:
total_width = get_terminal_size()[0]
except OSError:
total_width = None
if fd is None:
fd = sys.stdout
if use_color is None:
use_color = isatty(fd)
if total_width is not None:
if self.expand and actual_width < total_width:
width[-1] += total_width - actual_width
actual_width = total_width
while actual_width > total_width:
max_width = max(width)
if max_width == 1:
break
for i in range(len(width)):
if width[i] == max_width:
width[i] -= 1
actual_width -= 1
break
if self.columns:
fmt = color_fmt(" " + (" @{pf}" + HSEP + "@| ").join(["%s"] * len(width)) + "\n", use_color=use_color)
fd.write(color_fmt("@{pf}" + HTHICK + THEAD.join([HTHICK * w for w in width]) + HTHICK + "\n", use_color=use_color))
sep = color_fmt("@{pf}" + HTHIN + XSEP.join([HTHIN * w for w in width]) + HTHIN + "\n", use_color=use_color)
end = color_fmt("@{pf}" + HTHICK + TFOOT.join([HTHICK * w for w in width]) + HTHICK + "\n", use_color=use_color)
fd.write(fmt % tuple(pad_ansi_text(color_fmt("@!%s" % c, use_color=use_color), w) for c, w in zip(self.columns, width)))
else:
fmt = " %s %s\n"
sep = color_fmt("@{pf}" + (HTHIN * actual_width) + "@|\n", use_color=use_color)
end = sep
fd.write(sep)
for row in self.rows:
if row is None:
fd.write(sep)
continue
for line in zip_longest(*row, fillvalue=""):
chunks = (slice_ansi_text(color_fmt(r, use_color=use_color), w) for r, w in zip(line, width))
for chunk in zip_longest(*chunks):
fd.write(fmt % tuple(textify(r, fd=fd) if r is not None else " " * w for r, w in zip(chunk, width)))
fd.write(end)
| apache-2.0 |
ifduyue/sentry | tests/sentry/api/endpoints/test_project_processingissues.py | 2 | 4420 | from __future__ import absolute_import
from datetime import datetime
from django.utils import timezone
from django.core.urlresolvers import reverse
from sentry.models import (ProcessingIssue, EventError, RawEvent, EventProcessingIssue)
from sentry.testutils import APITestCase
class ProjectProjectProcessingIssuesTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name='foo')
raw_event = RawEvent.objects.create(project_id=project1.id, event_id='abc')
issue, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id, checksum='abc', type=EventError.NATIVE_MISSING_DSYM
)
EventProcessingIssue.objects.get_or_create(
raw_event=raw_event,
processing_issue=issue,
)
url = reverse(
'sentry-api-0-project-processing-issues',
kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['hasIssues'] is True
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['numIssues'] == 1
assert response.data['issuesProcessing'] == 0
assert response.data['resolveableIssues'] == 0
def test_issues(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name='foo')
raw_event = RawEvent.objects.create(project_id=project1.id, event_id='abc')
issue, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id,
checksum='abc',
type=EventError.NATIVE_MISSING_DSYM,
datetime=datetime(2013, 8, 13, 3, 8, 25, tzinfo=timezone.utc),
)
issue2, _ = ProcessingIssue.objects.get_or_create(
project_id=project1.id,
checksum='abcd',
type=EventError.NATIVE_MISSING_DSYM,
datetime=datetime(2014, 8, 13, 3, 8, 25, tzinfo=timezone.utc),
)
EventProcessingIssue.objects.get_or_create(
raw_event=raw_event,
processing_issue=issue,
)
url = reverse(
'sentry-api-0-project-processing-issues',
kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
}
)
response = self.client.get(url + '?detailed=1', format='json')
assert response.status_code == 200, response.content
assert len(response.data['issues']) == 2
assert response.data['numIssues'] == 2
assert response.data['lastSeen'] == issue2.datetime
assert response.data['hasIssues'] is True
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['issuesProcessing'] == 0
assert response.data['resolveableIssues'] == 0
assert response.data['issues'][0]['checksum'] == issue.checksum
assert response.data['issues'][0]['numEvents'] == 1
assert response.data['issues'][0]['type'] == EventError.NATIVE_MISSING_DSYM
assert response.data['issues'][1]['checksum'] == issue2.checksum
def test_resolvable_issues(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name='foo')
RawEvent.objects.create(project_id=project1.id, event_id='abc')
url = reverse(
'sentry-api-0-project-processing-issues',
kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
}
)
response = self.client.get(url + '?detailed=1', format='json')
assert response.status_code == 200, response.content
assert response.data['numIssues'] == 0
assert response.data['resolveableIssues'] == 1
assert response.data['lastSeen'] is None
assert response.data['hasIssues'] is False
assert response.data['hasMoreResolveableIssues'] is False
assert response.data['numIssues'] == 0
assert response.data['issuesProcessing'] == 0
| bsd-3-clause |
meghana1995/sympy | sympy/physics/quantum/__init__.py | 122 | 1501 | __all__ = []
# The following pattern is used below for importing sub-modules:
#
# 1. "from foo import *". This imports all the names from foo.__all__ into
# this module. But, this does not put those names into the __all__ of
# this module. This enables "from sympy.physics.quantum import State" to
# work.
# 2. "import foo; __all__.extend(foo.__all__)". This adds all the names in
# foo.__all__ to the __all__ of this module. The names in __all__
# determine which names are imported when
# "from sympy.physics.quantum import *" is done.
from . import anticommutator
from .anticommutator import *
__all__.extend(anticommutator.__all__)
from .qapply import __all__ as qap_all
from .qapply import *
__all__.extend(qap_all)
from . import commutator
from .commutator import *
__all__.extend(commutator.__all__)
from . import dagger
from .dagger import *
__all__.extend(dagger.__all__)
from . import hilbert
from .hilbert import *
__all__.extend(hilbert.__all__)
from . import innerproduct
from .innerproduct import *
__all__.extend(innerproduct.__all__)
from . import operator
from .operator import *
__all__.extend(operator.__all__)
from .represent import __all__ as rep_all
from .represent import *
__all__.extend(rep_all)
from . import state
from .state import *
__all__.extend(state.__all__)
from . import tensorproduct
from .tensorproduct import *
__all__.extend(tensorproduct.__all__)
from . import constants
from .constants import *
__all__.extend(constants.__all__)
| bsd-3-clause |
kajgan/e2 | lib/python/Components/Sensors.py | 104 | 1928 | from Components.FanControl import fancontrol
class Sensors:
# (type, name, unit, directory)
TYPE_TEMPERATURE = 0
# (type, name, unit, fanid)
TYPE_FAN_RPM = 1
def __init__(self):
# (type, name, unit, sensor_specific_dict/list)
self.sensors_list = []
self.addSensors()
def getSensorsCount(self, type = None):
if type is None:
return len(self.sensors_list)
count = 0
for sensor in self.sensors_list:
if sensor[0] == type:
count += 1
return count
# returns a list of sensorids of type "type"
def getSensorsList(self, type = None):
if type is None:
return range(len(self.sensors_list))
list = []
for sensorid in range(len(self.sensors_list)):
if self.sensors_list[sensorid][0] == type:
list.append(sensorid)
return list
def getSensorType(self, sensorid):
return self.sensors_list[sensorid][0]
def getSensorName(self, sensorid):
return self.sensors_list[sensorid][1]
def getSensorValue(self, sensorid):
value = -1
sensor = self.sensors_list[sensorid]
if sensor[0] == self.TYPE_TEMPERATURE:
value = int(open("%s/value" % sensor[3], "r").readline().strip())
elif sensor[0] == self.TYPE_FAN_RPM:
value = fancontrol.getFanSpeed(sensor[3])
return value
def getSensorUnit(self, sensorid):
return self.sensors_list[sensorid][2]
def addSensors(self):
import os
if os.path.exists("/proc/stb/sensors"):
for dirname in os.listdir("/proc/stb/sensors"):
if dirname.find("temp", 0, 4) == 0:
name = open("/proc/stb/sensors/%s/name" % dirname, "r").readline().strip()
unit = open("/proc/stb/sensors/%s/unit" % dirname, "r").readline().strip()
self.sensors_list.append((self.TYPE_TEMPERATURE, name, unit, "/proc/stb/sensors/%s" % dirname))
for fanid in range(fancontrol.getFanCount()):
if fancontrol.hasRPMSensor(fanid):
self.sensors_list.append((self.TYPE_FAN_RPM, _("Fan %d") % (fanid + 1), "rpm", fanid))
sensors = Sensors()
| gpl-2.0 |
zaina/nova | nova/tests/functional/test_legacy_v3_compatible_wrapper.py | 7 | 1429 | # Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import wsgi
from nova.tests.functional import integrated_helpers
from nova.tests.functional.v3 import api_paste_fixture
class LegacyV2CompatibleTestBase(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
def setUp(self):
self.useFixture(api_paste_fixture.ApiPasteV2CompatibleFixture())
super(LegacyV2CompatibleTestBase, self).setUp()
def test_request_with_microversion_headers(self):
response = self.api.api_post('os-keypairs',
{"keypair": {"name": "test"}},
headers={wsgi.API_VERSION_REQUEST_HEADER: '2.100'})
self.assertNotIn(wsgi.API_VERSION_REQUEST_HEADER, response.headers)
self.assertNotIn('Vary', response.headers)
self.assertNotIn('type', response.body["keypair"])
| apache-2.0 |
fmartingr/iosfu | iosfu/gui/components/base.py | 1 | 3711 | from os.path import join as join_path, dirname
from inspect import getfile
from iosfu.utils import slugify
class Component(object):
"""
Base GUI Component object
"""
# Component type, do NOT modify
_type = None
def __unicode__(self):
return self.name
# class Category(Component):
# _type = 'category'
# name = None
# def __init__(self, name):
# self.name = name
class Panel(Component):
"""
Main panel component.
Adds a section on the top navigation bar, into the selected category.
If the category don't have any other panels, this will be shown as main
button instead of the section.
"""
_type = 'panel'
# Identifier of the panel. Created automatically as a slug of the name if
# not set. GUI Panel identifiers MUST NOT BE REPEATED.
id = None
# Name of the panel (will be slugified to create an ID)
name = None
# List of sections
sections = []
# Section mapping by id
_section_map = {}
def __init__(self):
if not self.id and self.name:
self.id = slugify(self.name)
self._map_sections()
def get_section(self, section_id, backup):
if section_id in self._section_map:
return self.sections[self._section_map[section_id]](
backup=backup)
def render(self, *args, **kwargs):
"""
Main render method
"""
return 'Base GUIPanel'
#
# Privates
#
def _map_sections(self):
"""
Map section list position to its id in a dict()
"""
i = 0
for section in self.sections:
ins = section()
self._section_map[ins.__slug__] = i
i += 1
@property
def __slug__(self):
return self.id
class Section(Component):
"""
Section main component.
Gets a backup instance and a context, and returns the new context with
the plugin analysis finished and the template to render the information.
"""
_type = 'section'
# In case you need a custom ID for this panel.
id = None
# Name of the section
name = None
# Backup instance to work with
backup = None
# Plugin to use automagically
plugin = None
# Template context
context = dict()
def __init__(self, backup=None):
if backup:
self.backup = backup
if not self.id and self.name:
self.id = "{}".format(slugify(self.name))
def get_template(self, template_name):
tmpl = join_path(
dirname(getfile(self.__class__)), 'templates', template_name)
try:
with open(tmpl) as handler:
template_content = handler.read()
except IOError:
try:
# raise 'Template {} do not exist.'.format(template_name)
tmpl = join_path(dirname(getfile(
self.__class__.__bases__[0])), 'templates', template_name)
with open(tmpl) as handler:
template_content = handler.read()
except IOError:
raise 'Could not read template {}'.format(template_name)
return template_content
def get_context(self):
if self.backup and self.plugin:
plugin = self.plugin(backup=self.backup)
self.context = plugin.do()
def render(self, *args, **kwargs):
"""
Base rendering method
"""
ctx = kwargs.pop('ctx', dict())
self.get_context()
ctx.update(dict(plugin_data=self.context))
return self.get_template(self.template), ctx
@property
def __slug__(self):
return self.id
| mit |
simod/geonode | geonode/geoserver/context_processors.py | 5 | 2362 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.urlresolvers import reverse
from geonode.geoserver.helpers import ogc_server_settings
def geoserver_urls(request):
"""Global values to pass to templates"""
defaults = dict(
GEOSERVER_LOCAL_URL=ogc_server_settings.LOCATION,
GEOSERVER_BASE_URL=ogc_server_settings.public_url,
UPLOADER_URL=reverse('data_upload') if getattr(
settings,
'UPLOADER',
dict()).get(
'BACKEND',
'geonode.rest') == 'geonode.importer' else reverse('layer_upload'),
LAYER_ANCILLARY_FILES_UPLOAD_URL=reverse('layer_upload'),
MAPFISH_PRINT_ENABLED=getattr(ogc_server_settings, 'MAPFISH_PRINT_ENABLED', False),
PRINT_NG_ENABLED=getattr(ogc_server_settings, 'PRINT_NG_ENABLED', False),
GEONODE_SECURITY_ENABLED=getattr(ogc_server_settings, 'GEONODE_SECURITY_ENABLED', False),
GEOGIG_ENABLED=getattr(ogc_server_settings, 'GEOGIG_ENABLED', False),
TIME_ENABLED=getattr(
settings,
'UPLOADER',
dict()).get(
'OPTIONS',
dict()).get(
'TIME_ENABLED',
False),
MOSAIC_ENABLED=getattr(
settings,
'UPLOADER',
dict()).get(
'OPTIONS',
dict()).get(
'MOSAIC_ENABLED',
False),
)
return defaults
| gpl-3.0 |
romulojales/estudos-golang | data science from scratch/chap1.py | 2 | 1266 | users = [
{"id": 0, "name": "Hero"},
{"id": 1, "name": "Dunn"},
{"id": 2, "name": "Sue"},
{"id": 3, "name": "Chi"},
{"id": 4, "name": "Thor"},
{"id": 5, "name": "Clive"},
{"id": 6, "name": "Hicks"},
{"id": 7, "name": "Devin"},
{"id": 8, "name": "Kate"},
{"id": 9, "name": "Klein"}
]
friendships = [
(0, 1),
(0, 2),
(1, 2),
(1, 3),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(5, 7),
(6, 8),
(7, 8),
(8, 9)
]
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"])
for user in users:
user["friends"] = []
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add j as a friend of i
users[j]["friends"].append(users[i]) # add i as a friend of j
number_of_friends_by_user = [number_of_friends(user) for user in users]
total_connections = sum(number_of_friends_by_user)
num_users = len(users)
avg_connections = total_connections / num_users
print(avg_connections)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print (sorted(num_friends_by_id,
key=(lambda x: x[1]),
reverse=True))
| mit |
napkindrawing/ansible | test/units/modules/network/nxos/test_nxos_system.py | 15 | 6187 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_system
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosSystemModule(TestNxosModule):
module = nxos_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('nxos_system_config.cfg')
self.load_config.return_value = None
def test_nxos_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_domain_lookup(self):
set_module_args(dict(domain_lookup=True))
commands = ['ip domain-lookup']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_missing_vrf(self):
domain_name = dict(name='example.com', vrf='example')
set_module_args(dict(domain_name=domain_name))
self.execute_module(failed=True)
def test_nxos_system_domain_name(self):
set_module_args(dict(domain_name=['example.net']))
commands = ['no ip domain-name ansible.com',
'vrf context management', 'no ip domain-name eng.ansible.com', 'exit',
'ip domain-name example.net']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_domain_name_complex(self):
domain_name = dict(name='example.net', vrf='management')
set_module_args(dict(domain_name=[domain_name]))
commands = ['no ip domain-name ansible.com',
'vrf context management', 'no ip domain-name eng.ansible.com', 'exit',
'vrf context management', 'ip domain-name example.net', 'exit']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_domain_search(self):
set_module_args(dict(domain_search=['example.net']))
commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit',
'vrf context management', 'no ip domain-list redhat.com', 'exit',
'no ip domain-list ansible.com', 'no ip domain-list redhat.com',
'ip domain-list example.net']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_domain_search_complex(self):
domain_search = dict(name='example.net', vrf='management')
set_module_args(dict(domain_search=[domain_search]))
commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit',
'vrf context management', 'no ip domain-list redhat.com', 'exit',
'no ip domain-list ansible.com', 'no ip domain-list redhat.com',
'vrf context management', 'ip domain-list example.net', 'exit']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_name_servers(self):
set_module_args(dict(name_servers=['1.2.3.4', '8.8.8.8']))
commands = ['no ip name-server 172.26.1.1',
'vrf context management', 'no ip name-server 8.8.8.8', 'exit',
'vrf context management', 'no ip name-server 172.26.1.1', 'exit',
'ip name-server 1.2.3.4']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_name_servers_complex(self):
name_servers = dict(server='1.2.3.4', vrf='management')
set_module_args(dict(name_servers=[name_servers]))
commands = ['no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1',
'vrf context management', 'no ip name-server 8.8.8.8', 'exit',
'vrf context management', 'no ip name-server 172.26.1.1', 'exit',
'vrf context management', 'ip name-server 1.2.3.4', 'exit']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_system_mtu(self):
set_module_args(dict(system_mtu=2000))
commands = ['system jumbomtu 2000']
self.execute_module(changed=True, commands=commands)
def test_nxos_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = ['no hostname', 'no ip domain-name ansible.com',
'vrf context management', 'no ip domain-name eng.ansible.com', 'exit',
'no ip domain-list ansible.com', 'no ip domain-list redhat.com',
'vrf context management', 'no ip domain-list ansible.com', 'exit',
'vrf context management', 'no ip domain-list redhat.com', 'exit',
'no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1',
'vrf context management', 'no ip name-server 8.8.8.8', 'exit',
'vrf context management', 'no ip name-server 172.26.1.1', 'exit',
'no system jumbomtu']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
dcuolfg/dcuolfg | dcuolfg/characters/views.py | 2 | 2683 | # Copyright 2012-2013 Deryck Hodge. This software is licensed under the
# GNU Lesser General Public License version 3 (see the file LICENSE).
"""
Site-wide views.
"""
from django.http import (
Http404,
HttpResponseRedirect,
)
from django.shortcuts import (
get_object_or_404,
render,
)
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from dcuolfg.characters.forms import AddCharacterForm
from dcuolfg.characters.models import Character
def _get_server_value_from_name(server_name):
"""Return the server value to store based on server name."""
server_value = [
srv[0] for srv in Character.CHARACTER_SERVER_CHOICES
if srv[1] == server_name]
if len(server_value) == 1:
return server_value[0]
return None
def index(request):
"""Main list of recently updated characters."""
characters = Character.objects.updated()
data = {
'characters': characters,
'mission_count': len(characters),
}
return render(request, 'characters/index.html', data)
@login_required
def add_character(request):
"""A view for a player to add a character."""
form = None
if request.POST:
character = Character(player=request.user)
form = AddCharacterForm(request.POST, instance=character)
if form.is_valid():
character = form.save()
character_args = (character.get_server_display(), character.name)
return HttpResponseRedirect(
reverse('character_profile', args=character_args))
else:
form = AddCharacterForm()
data = {
'form': form,
}
return render(request, 'characters/add.html', data)
@login_required
def delete_character(request, server, name):
"""A view for a player to delete a character."""
server_value = _get_server_value_from_name(server)
if server_value is None:
raise Http404
character = get_object_or_404(Character, server=server_value, name=name)
if request.POST:
character.delete()
return HttpResponseRedirect(reverse('delete_character_success'))
data = {
'character': character,
}
return render(request, 'characters/delete.html', data)
def character_profile(request, server, name):
"""Render a profile view for a character."""
server_value = _get_server_value_from_name(server)
if server_value is None:
raise Http404
character = get_object_or_404(Character, server=server_value, name=name)
data = {
'character': character,
}
return render(request, 'characters/profile.html', data)
| lgpl-3.0 |
wimberosa/samba | buildtools/wafadmin/Tools/cs.py | 19 | 1803 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"C# support"
import TaskGen, Utils, Task, Options
from Logs import error
from TaskGen import before, after, taskgen, feature
flag_vars= ['FLAGS', 'ASSEMBLIES']
@feature('cs')
def init_cs(self):
Utils.def_attrs(self,
flags = '',
assemblies = '',
resources = '',
uselib = '')
@feature('cs')
@after('init_cs')
def apply_uselib_cs(self):
if not self.uselib:
return
global flag_vars
for var in self.to_list(self.uselib):
for v in self.flag_vars:
val = self.env[v+'_'+var]
if val: self.env.append_value(v, val)
@feature('cs')
@after('apply_uselib_cs')
@before('apply_core')
def apply_cs(self):
try: self.meths.remove('apply_core')
except ValueError: pass
# process the flags for the assemblies
for i in self.to_list(self.assemblies) + self.env['ASSEMBLIES']:
self.env.append_unique('_ASSEMBLIES', '/r:'+i)
# process the flags for the resources
for i in self.to_list(self.resources):
self.env.append_unique('_RESOURCES', '/resource:'+i)
# what kind of assembly are we generating?
self.env['_TYPE'] = getattr(self, 'type', 'exe')
# additional flags
self.env.append_unique('_FLAGS', self.to_list(self.flags))
self.env.append_unique('_FLAGS', self.env.FLAGS)
# process the sources
nodes = [self.path.find_resource(i) for i in self.to_list(self.source)]
self.create_task('mcs', nodes, self.path.find_or_declare(self.target))
Task.simple_task_type('mcs', '${MCS} ${SRC} /target:${_TYPE} /out:${TGT} ${_FLAGS} ${_ASSEMBLIES} ${_RESOURCES}', color='YELLOW')
def detect(conf):
csc = getattr(Options.options, 'cscbinary', None)
if csc:
conf.env.MCS = csc
conf.find_program(['gmcs', 'mcs'], var='MCS')
def set_options(opt):
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
| gpl-3.0 |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/django/conf/locale/zh_Hans/formats.py | 1008 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| gpl-3.0 |
rezoo/chainer | chainer/functions/math/bias.py | 5 | 1529 | import chainer
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
def bias(x, y, axis=1):
"""Elementwise summation with broadcasting.
Computes a elementwise summation of two input variables, with the shape of
the latter variable broadcasted to match the shape of the former. ``axis``
is the first axis of the first variable along which the second variable is
applied.
The term "broadcasting" here comes from Caffe's bias layer so the
"broadcasting" with the following arguments::
x : 100 x 3 x 40 x 5 x 6
y : 3 x 40
axis : 1
is equivalent to the following numpy broadcasting::
x : 100 x 3 x 40 x 5 x 6
y : (1 x) 3 x 40 x 1 x 1
Note that the axis of ``x`` to which we apply ``y`` is specified by the
argument ``axis``, whose meaning is different from numpy's ``axis``.
Args:
x (~chainer.Variable): Input variable to be summed.
y (~chainer.Variable): Input variable to sum, broadcasted.
axis (int): The first axis of ``x`` along which ``y`` is applied.
Returns:
~chainer.Variable: Output variable.
"""
x_shape = x.shape
y_shape = y.shape
if chainer.is_debug():
assert x_shape[axis:axis + len(y_shape)] == y_shape
y1_shape = tuple([1] * axis + list(y_shape) +
[1] * (len(x_shape) - axis - len(y_shape)))
y1 = reshape.reshape(y, y1_shape)
y2 = broadcast.broadcast_to(y1, x_shape)
return x + y2
| mit |
cernops/python-keystoneclient-kerberos | keystoneclient_kerberos/v3.py | 1 | 1161 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import v3
import requests_kerberos
class KerberosMethod(v3.AuthMethod):
_method_parameters = []
def get_auth_data(self, session, auth, headers, request_kwargs, **kwargs):
# NOTE(jamielennox): request_kwargs is passed as a kwarg however it is
# required and always present when called from keystoneclient.
request_kwargs['requests_auth'] = requests_kerberos.HTTPKerberosAuth(
mutual_authentication=requests_kerberos.DISABLED)
return 'external', {}
class Kerberos(v3.AuthConstructor):
_auth_method_class = KerberosMethod
| apache-2.0 |
facilecoin/facilecoin-core | qa/rpc-tests/rest.py | 1 | 3267 | #!/usr/bin/env python2
# Copyright (c) 2014 The FacileCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import FacileCoinTestFramework
from util import *
import json
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (FacileCoinTestFramework):
FORMAT_SEPARATOR = "."
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
bb_hash = self.nodes[0].getbestblockhash()
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].setgenerate(True, 1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
if __name__ == '__main__':
RESTTest ().main ()
| mit |
alvaroaleman/ansible | lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py | 23 | 35423 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: clc_loadbalancer
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
version_added: "2.0"
options:
name:
description:
- The name of the loadbalancer
required: True
description:
description:
- A description for the loadbalancer
required: False
default: None
alias:
description:
- The alias of your CLC Account
required: True
location:
description:
- The location of the datacenter where the load balancer resides in
required: True
method:
description:
-The balancing method for the load balancer pool
required: False
default: None
choices: ['leastConnection', 'roundRobin']
persistence:
description:
- The persistence method for the load balancer
required: False
default: None
choices: ['standard', 'sticky']
port:
description:
- Port to configure on the public-facing side of the load balancer pool
required: False
default: None
choices: [80, 443]
nodes:
description:
- A list of nodes that needs to be added to the load balancer pool
required: False
default: []
status:
description:
- The status of the loadbalancer
required: False
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Whether to create or delete the load balancer pool
required: False
default: present
choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: present
- name: Add node to an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_present
- name: Remove node from an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.234
privatePort: 80
state: nodes_absent
- name: Delete LoadbalancerPool
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: port_absent
- name: Delete Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- ipAddress: 10.11.22.123
privatePort: 80
state: absent
'''
RETURN = '''
loadbalancer:
description: The load balancer result object from CLC
returned: success
type: dict
sample:
{
"description":"test-lb",
"id":"ab5b18cb81e94ab9925b61d1ca043fb5",
"ipAddress":"66.150.174.197",
"links":[
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
"rel":"pools",
"verbs":[
"GET",
"POST"
]
}
],
"name":"test-lb",
"pools":[
],
"status":"enabled"
}
'''
__version__ = '${version}'
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcLoadBalancer:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.lb_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
result_lb = None
loadbalancer_name = self.module.params.get('name')
loadbalancer_alias = self.module.params.get('alias')
loadbalancer_location = self.module.params.get('location')
loadbalancer_description = self.module.params.get('description')
loadbalancer_port = self.module.params.get('port')
loadbalancer_method = self.module.params.get('method')
loadbalancer_persistence = self.module.params.get('persistence')
loadbalancer_nodes = self.module.params.get('nodes')
loadbalancer_status = self.module.params.get('status')
state = self.module.params.get('state')
if loadbalancer_description is None:
loadbalancer_description = loadbalancer_name
self._set_clc_credentials_from_env()
self.lb_dict = self._get_loadbalancer_list(
alias=loadbalancer_alias,
location=loadbalancer_location)
if state == 'present':
changed, result_lb, lb_id = self.ensure_loadbalancer_present(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location,
description=loadbalancer_description,
status=loadbalancer_status)
if loadbalancer_port:
changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
lb_id=lb_id,
alias=loadbalancer_alias,
location=loadbalancer_location,
method=loadbalancer_method,
persistence=loadbalancer_persistence,
port=loadbalancer_port)
if loadbalancer_nodes:
changed, result_nodes = self.ensure_lbpool_nodes_set(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'absent':
changed, result_lb = self.ensure_loadbalancer_absent(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location)
elif state == 'port_absent':
changed, result_lb = self.ensure_loadbalancerpool_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port)
elif state == 'nodes_present':
changed, result_lb = self.ensure_lbpool_nodes_present(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'nodes_absent':
changed, result_lb = self.ensure_lbpool_nodes_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
self.module.exit_json(changed=changed, loadbalancer=result_lb)
def ensure_loadbalancer_present(
self, name, alias, location, description, status):
"""
Checks to see if a load balancer exists and creates one if it does not.
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description of loadbalancer
:param status: Enabled / Disabled
:return: (changed, result, lb_id)
changed: Boolean whether a change was made
result: The result object from the CLC load balancer request
lb_id: The load balancer id
"""
changed = False
result = name
lb_id = self._loadbalancer_exists(name=name)
if not lb_id:
if not self.module.check_mode:
result = self.create_loadbalancer(name=name,
alias=alias,
location=location,
description=description,
status=status)
lb_id = result.get('id')
changed = True
return changed, result, lb_id
def ensure_loadbalancerpool_present(
self, lb_id, alias, location, method, persistence, port):
"""
Checks to see if a load balancer pool exists and creates one if it does not.
:param lb_id: The loadbalancer id
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: (changed, group, pool_id) -
changed: Boolean whether a change was made
result: The result from the CLC API call
pool_id: The string id of the load balancer pool
"""
changed = False
result = port
if not lb_id:
return changed, None, None
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if not pool_id:
if not self.module.check_mode:
result = self.create_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
method=method,
persistence=persistence,
port=port)
pool_id = result.get('id')
changed = True
return changed, result, pool_id
def ensure_loadbalancer_absent(self, name, alias, location):
"""
Checks to see if a load balancer exists and deletes it if it does
:param name: Name of the load balancer
:param alias: Alias of account
:param location: Datacenter
:return: (changed, result)
changed: Boolean whether a change was made
result: The result from the CLC API Call
"""
changed = False
result = name
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
if not self.module.check_mode:
result = self.delete_loadbalancer(alias=alias,
location=location,
name=name)
changed = True
return changed, result
def ensure_loadbalancerpool_absent(self, alias, location, name, port):
"""
Checks to see if a load balancer pool exists and deletes it if it does
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer listens on
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = None
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed = True
if not self.module.check_mode:
result = self.delete_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id)
else:
result = "Pool doesn't exist"
else:
result = "LB Doesn't Exist"
return changed, result
def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool
and set the nodes if any in the list those doesn't exist
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: The list of nodes to be updated to the pool
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
result = {}
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_check=nodes)
if not nodes_exist:
changed = True
result = self.set_loadbalancernodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be added
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.add_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_add=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and removes them if found any
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be removed
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.remove_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_remove=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def create_loadbalancer(self, name, alias, location, description, status):
"""
Create a loadbalancer w/ params
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description for loadbalancer to be created
:param status: Enabled / Disabled
:return: result: The result from the CLC API call
"""
result = None
try:
result = self.clc.v2.API.Call('POST',
'/v2/sharedLoadBalancers/%s/%s' % (alias,
location),
json.dumps({"name": name,
"description": description,
"status": status}))
sleep(1)
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def create_loadbalancerpool(
self, alias, location, lb_id, method, persistence, port):
"""
Creates a pool on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: result: The result from the create API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id), json.dumps(
{
"port": port, "method": method, "persistence": persistence
}))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def delete_loadbalancer(self, alias, location, name):
"""
Delete CLC loadbalancer
:param alias: Alias for account
:param location: Datacenter
:param name: Name of the loadbalancer to delete
:return: result: The result from the CLC API call
"""
result = None
lb_id = self._get_loadbalancer_id(name=name)
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
(alias, location, lb_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
"""
Delete the pool on the provided load balancer
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the load balancer pool
:return: result: The result from the delete API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
(alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def _get_loadbalancer_id(self, name):
"""
Retrieves unique ID of loadbalancer
:param name: Name of loadbalancer
:return: Unique ID of the loadbalancer
"""
id = None
for lb in self.lb_dict:
if lb.get('name') == name:
id = lb.get('id')
return id
def _get_loadbalancer_list(self, alias, location):
"""
Retrieve a list of loadbalancers
:param alias: Alias for account
:param location: Datacenter
:return: JSON data for all loadbalancers at datacenter
"""
result = None
try:
result = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch load balancers for account: {0}. {1}'.format(
alias, str(e.response_text)))
return result
def _loadbalancer_exists(self, name):
"""
Verify a loadbalancer exists
:param name: Name of loadbalancer
:return: False or the ID of the existing loadbalancer
"""
result = False
for lb in self.lb_dict:
if lb.get('name') == name:
result = lb.get('id')
return result
def _loadbalancerpool_exists(self, alias, location, port, lb_id):
"""
Checks to see if a pool exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param port: the port to check and see if it exists
:param lb_id: the id string of the provided load balancer
:return: result: The id string of the pool or False
"""
result = False
try:
pool_list = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
lb_id, str(e.response_text)))
for pool in pool_list:
if int(pool.get('port')) == int(port):
result = pool.get('id')
return result
def _loadbalancerpool_nodes_exists(
self, alias, location, lb_id, pool_id, nodes_to_check):
"""
Checks to see if a set of nodes exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the provided load balancer
:param pool_id: the id string of the load balancer pool
:param nodes_to_check: the list of nodes to check for
:return: result: True / False indicating if the given nodes exist
"""
result = False
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_check:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
result = True
else:
result = False
return result
def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
"""
Updates nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes: a list of dictionaries containing the nodes to set
:return: result: The result from the CLC API call
"""
result = None
if not lb_id:
return result
if not self.module.check_mode:
try:
result = self.clc.v2.API.Call('PUT',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id), json.dumps(nodes))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
pool_id, str(e.response_text)))
return result
def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
"""
Add nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_add: a list of dictionaries containing the nodes to add
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_add:
if not node.get('status'):
node['status'] = 'enabled'
if not node in nodes:
changed = True
nodes.append(node)
if changed == True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def remove_lbpool_nodes(
self, alias, location, lb_id, pool_id, nodes_to_remove):
"""
Removes nodes from the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_remove: a list of dictionaries containing the nodes to remove
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_remove:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
changed = True
nodes.remove(node)
if changed == True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
"""
Return the list of nodes available to the provided load balancer pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:return: result: The list of nodes
"""
result = None
try:
result = self.clc.v2.API.Call('GET',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
pool_id, str(e.response_text)))
return result
@staticmethod
def define_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
location=dict(required=True),
alias=dict(required=True),
port=dict(choices=[80, 443]),
method=dict(choices=['leastConnection', 'roundRobin']),
persistence=dict(choices=['standard', 'sticky']),
nodes=dict(type='list', default=[]),
status=dict(default='enabled', choices=['enabled', 'disabled']),
state=dict(
default='present',
choices=[
'present',
'absent',
'port_absent',
'nodes_present',
'nodes_absent'])
)
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
supports_check_mode=True)
clc_loadbalancer = ClcLoadBalancer(module)
clc_loadbalancer.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
snak3ater/kernel_msm | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
quru/wagtail | wagtail/wagtailsearch/migrations/0003_remove_editors_pick.py | 31 | 1292 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailsearch', '0002_add_verbose_names'),
]
operations = [
# EditorsPicks have been moved to the "wagtailsearchpromotions" module.
# Remove EditorsPick from wagtailsearch but don't drop the underlying table
# so wagtailsearchpromotions can pick it up in its initial migration.
# If wagtailsearchpromotions isn't installed, this table will remain
# in the database unmanaged until it is. This could potentially happen
# at any point in the future so it's important to keep this behaviour
# even if we decide to squash these migrations.
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.RemoveField(
model_name='editorspick',
name='page',
),
migrations.RemoveField(
model_name='editorspick',
name='query',
),
migrations.DeleteModel(
name='EditorsPick',
),
],
database_operations=[],
)
]
| bsd-3-clause |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py | 124 | 5952 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import sys
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class PrepareChangeLog(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.quiet,
Options.email,
Options.git_commit,
Options.update_changelogs,
]
def _ensure_bug_url(self, state):
if not state.get("bug_id"):
return
bug_id = state.get("bug_id")
changelogs = self.cached_lookup(state, "changelogs")
for changelog_path in changelogs:
changelog = ChangeLog(changelog_path, self._tool.filesystem)
if not changelog.latest_entry().bug_id():
changelog.set_short_description_and_bug_url(
self.cached_lookup(state, "bug_title"),
self._tool.bugs.bug_url_for_bug_id(bug_id))
def _resolve_existing_entry(self, changelog_path):
# When this is called, the top entry in the ChangeLog was just created
# by prepare-ChangeLog, as an clean updated version of the one below it.
with self._tool.filesystem.open_text_file_for_reading(changelog_path) as changelog_file:
entries_gen = ChangeLog.parse_entries_from_file(changelog_file)
entries = zip(entries_gen, range(2))
if not len(entries):
raise Exception("Expected to find at least two ChangeLog entries in %s but found none." % changelog_path)
if len(entries) == 1:
# If we get here, it probably means we've just rolled over to a
# new CL file, so we don't have anything to resolve.
return
(new_entry, _), (old_entry, _) = entries
final_entry = self._merge_entries(old_entry, new_entry)
changelog = ChangeLog(changelog_path, self._tool.filesystem)
changelog.delete_entries(2)
changelog.prepend_text(final_entry)
def _merge_entries(self, old_entry, new_entry):
final_entry = old_entry.contents()
final_entry = final_entry.replace(old_entry.date(), new_entry.date(), 1)
new_bug_desc = new_entry.bug_description()
old_bug_desc = old_entry.bug_description()
if new_bug_desc and old_bug_desc and new_bug_desc != old_bug_desc:
final_entry = final_entry.replace(old_bug_desc, new_bug_desc)
new_touched = new_entry.touched_functions()
old_touched = old_entry.touched_functions()
if new_touched != old_touched:
if old_entry.is_touched_files_text_clean():
final_entry = final_entry.replace(old_entry.touched_files_text(), new_entry.touched_files_text())
else:
final_entry += "\n" + new_entry.touched_files_text()
return final_entry + "\n"
def run(self, state):
if self.cached_lookup(state, "changelogs"):
self._ensure_bug_url(state)
if not self._options.update_changelogs:
return
args = self._tool.deprecated_port().prepare_changelog_command()
if state.get("bug_id"):
args.append("--bug=%s" % state["bug_id"])
args.append("--description=%s" % self.cached_lookup(state, 'bug_title'))
if self._options.email:
args.append("--email=%s" % self._options.email)
if self._tool.scm().supports_local_commits():
args.append("--merge-base=%s" % self._tool.scm().merge_base(self._options.git_commit))
args.extend(self._changed_files(state))
try:
output = self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.error("Unable to prepare ChangeLogs.")
sys.exit(1)
# These are the ChangeLog entries added by prepare-Changelog
changelogs = re.findall(r'Editing the (\S*/ChangeLog) file.', output)
changelogs = set(self._tool.filesystem.join(self._tool.scm().checkout_root, f) for f in changelogs)
for changelog in changelogs & set(self.cached_lookup(state, "changelogs")):
self._resolve_existing_entry(changelog)
self.did_modify_checkout(state)
| bsd-3-clause |
CodingCat/mxnet | tests/nightly/mxnet_keras_integration_tests/model_util.py | 43 | 2441 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from keras import backend as K
from keras.models import Model
from keras.layers import Input, merge
from keras.layers.core import Lambda
# Before running the integration tests, users are expected to set these
# environment variables.
IS_GPU = (os.environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
GPU_NUM = int(os.environ['GPU_NUM']) if IS_GPU else 0
KERAS_BACKEND = os.environ['KERAS_BACKEND']
def slice_batch(x, n_gpus, part):
sh = K.shape(x)
L = sh[0] / n_gpus
if part == n_gpus - 1:
return x[part*L:]
return x[part*L:(part+1)*L]
def prepare_gpu_model(model, **kwargs):
gpu_list = []
for i in range(GPU_NUM):
gpu_list.append('gpu(%d)' % i)
if KERAS_BACKEND == 'mxnet':
kwargs['context'] = gpu_list
model.compile(**kwargs)
else:
model.compile(**kwargs)
def prepare_cpu_model(model, **kwargs):
model.compile(**kwargs)
def make_model(model, **kwargs):
"""
Compiles the Keras Model object for given backend type and machine type.
Use this function to write one Keras code and run it across different machine type.
If environment variable - MXNET_KERAS_TEST_MACHINE is set to CPU, then Compiles
Keras Model for running on CPU.
If environment variable - MXNET_KERAS_TEST_MACHINE is set to GPU, then Compiles
Keras Model running on GPU using number of GPUs equal to number specified in
GPU_NUM environment variable.
Currently supports only MXNet as Keras backend.
"""
if(IS_GPU):
prepare_gpu_model(model, **kwargs)
else:
prepare_cpu_model(model, **kwargs)
return model
| apache-2.0 |
jeffrey4l/nova | nova/api/openstack/compute/plugins/v3/server_diagnostics.py | 24 | 2604 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.os_compute_authorizer(ALIAS)
class ServerDiagnosticsController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
@extensions.expected_errors((404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
# NOTE(gmann): To make V21 same as V2 API, this method will call
# 'get_diagnostics' instead of 'get_instance_diagnostics'.
# In future, 'get_instance_diagnostics' needs to be called to
# provide VM diagnostics in a defined format for all driver.
# BP - https://blueprints.launchpad.net/nova/+spec/v3-diagnostics.
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except NotImplementedError:
common.raise_feature_not_supported()
class ServerDiagnostics(extensions.V3APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
resources = [
extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
TheTypoMaster/chromium-crosswalk | third_party/jinja2/loaders.py | 255 | 17027 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
"""
def __init__(self, searchpath, encoding='utf-8'):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
for dirpath, dirnames, filenames in os.walk(searchpath):
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function becomes the name of the template passed and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
| bsd-3-clause |
cchurch/ansible | lib/ansible/plugins/callback/counter_enabled.py | 23 | 10824 | # (c) 2018, Ivan Aragones Muniesa <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar
from ansible.playbook.task_include import TaskInclude
DOCUMENTATION = '''
callback: counter_enabled
type: stdout
short_description: adds counters to the output items (tasks and hosts/task)
version_added: "2.7"
description:
- Use this callback when you need a kind of progress bar on a large environments.
- You will know how many tasks has the playbook to run, and which one is actually running.
- You will know how many hosts may run a task, and which of them is actually running.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
'''
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'counter_enabled'
_task_counter = 1
_task_total = 0
_host_counter = 1
_host_total = 0
def __init__(self):
super(CallbackModule, self).__init__()
self._playbook = ""
self._play = ""
def _all_vars(self, host=None, task=None):
# host and task need to be specified in case 'magic variables' (host vars, group vars, etc)
# need to be loaded as well
return self._play.get_variable_manager().get_vars(
play=self._play,
host=host,
task=task
)
def v2_playbook_on_start(self, playbook):
self._playbook = playbook
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"play"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
self._play = play
self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
self._task_total = len(self._play.get_tasks()[0])
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for host in hosts:
stat = stats.summarize(host)
self._display.display(u"%s : %s %s %s %s %s %s" % (
hostcolor(host, stat),
colorize(u'ok', stat['ok'], C.COLOR_OK),
colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', stat['failures'], C.COLOR_ERROR),
colorize(u'rescued', stat['rescued'], C.COLOR_OK),
colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s %s %s" % (
hostcolor(host, stat, False),
colorize(u'ok', stat['ok'], None),
colorize(u'changed', stat['changed'], None),
colorize(u'unreachable', stat['unreachable'], None),
colorize(u'failed', stat['failures'], None),
colorize(u'rescued', stat['rescued'], None),
colorize(u'ignored', stat['ignored'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats
if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom:
# fallback on constants for inherited plugins missing docs
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def v2_playbook_on_task_start(self, task, is_conditional):
args = ''
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it there yet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = ', '.join(('%s=%s' % a for a in task.args.items()))
args = ' %s' % args
self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
self._host_counter = 0
self._task_counter += 1
def v2_runner_on_ok(self, result):
self._host_counter += 1
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_failed(self, result, ignore_errors=False):
self._host_counter += 1
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_ERROR)
else:
self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_skipped(self, result):
self._host_counter += 1
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
self._host_counter += 1
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
| gpl-3.0 |
jefftc/changlab | Betsy/Betsy/modules/plot_prediction.py | 1 | 2613 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import mplgraph
from genomicode import filelib
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
header = matrix[0]
index = header.index('Confidence')
matrix = matrix[1:]
confidence = [float(i[index]) for i in matrix]
sample = [i[0] for i in matrix]
if confidence == [''] * len(matrix) or 'Correct?' in header:
index = header.index('Predicted_class')
class_value = [i[index] for i in matrix]
label_dict = dict()
label_list = []
i = -1
for label in class_value:
if label not in label_dict.keys():
i = i + 1
label_dict[label] = i
label_list.append(label_dict[label])
yticks = label_dict.keys()
ytick_pos = [label_dict[i] for i in label_dict.keys()]
fig = mplgraph.barplot(label_list,
box_label=sample,
ylim=(-0.5, 1.5),
ytick_pos=ytick_pos,
yticks=yticks,
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
else:
fig = mplgraph.barplot(confidence,
box_label=sample,
ylim=(-1.5, 1.5),
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for plot_prediction_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
loocv = ''
if antecedents.data.attributes['loocv'] == 'yes':
loocv = 'loocv'
filename = ('prediction_' + original_file + '_' +
antecedents.data.attributes['classify_alg'] + loocv + '.png')
return filename
| mit |
CUCWD/edx-platform | common/lib/sandbox-packages/verifiers/tests_draganddrop.py | 24 | 32789 | import json
import unittest
import draganddrop
from .draganddrop import PositionsCompare
class Test_PositionsCompare(unittest.TestCase):
""" describe"""
def test_nested_list_and_list1(self):
self.assertEqual(PositionsCompare([[1, 2], 40]), PositionsCompare([1, 3]))
def test_nested_list_and_list2(self):
self.assertNotEqual(PositionsCompare([1, 12]), PositionsCompare([1, 1]))
def test_list_and_list1(self):
self.assertNotEqual(PositionsCompare([[1, 2], 12]), PositionsCompare([1, 15]))
def test_list_and_list2(self):
self.assertEqual(PositionsCompare([1, 11]), PositionsCompare([1, 1]))
def test_numerical_list_and_string_list(self):
self.assertNotEqual(PositionsCompare([1, 2]), PositionsCompare(["1"]))
def test_string_and_string_list1(self):
self.assertEqual(PositionsCompare("1"), PositionsCompare(["1"]))
def test_string_and_string_list2(self):
self.assertEqual(PositionsCompare("abc"), PositionsCompare("abc"))
def test_string_and_string_list3(self):
self.assertNotEqual(PositionsCompare("abd"), PositionsCompare("abe"))
def test_float_and_string(self):
self.assertNotEqual(PositionsCompare([3.5, 5.7]), PositionsCompare(["1"]))
def test_floats_and_ints(self):
self.assertEqual(PositionsCompare([3.5, 4.5]), PositionsCompare([5, 7]))
class Test_DragAndDrop_Grade(unittest.TestCase):
def test_targets_are_draggable_1(self):
user_input = json.dumps([
{'p': 'p_l'},
{'up': {'first': {'p': 'p_l'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'anyof'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][first]'
],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2(self):
user_input = json.dumps([
{'p': 'p_l'},
{'p': 'p_r'},
{'s': 's_l'},
{'s': 's_r'},
{'up': {'1': {'p': 'p_l'}}},
{'up': {'3': {'p': 'p_l'}}},
{'up': {'1': {'p': 'p_r'}}},
{'up': {'3': {'p': 'p_r'}}},
{'up_and_down': {'1': {'s': 's_l'}}},
{'up_and_down': {'1': {'s': 's_r'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2_manual_parsing(self):
user_input = json.dumps([
{'up': 'p_l[p][1]'},
{'p': 'p_l'},
{'up': 'p_l[p][3]'},
{'up': 'p_r[p][1]'},
{'p': 'p_r'},
{'up': 'p_r[p][3]'},
{'up_and_down': 's_l[s][1]'},
{'s': 's_l'},
{'up_and_down': 's_r[s][1]'},
{'s': 's_r'}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_3_nested(self):
user_input = json.dumps([
{'molecule': 'left_side_tagret'},
{'molecule': 'right_side_tagret'},
{'p': {'p_target': {'molecule': 'left_side_tagret'}}},
{'p': {'p_target': {'molecule': 'right_side_tagret'}}},
{'s': {'s_target': {'molecule': 'left_side_tagret'}}},
{'s': {'s_target': {'molecule': 'right_side_tagret'}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'left_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'right_side_tagret'}}}}}
])
correct_answer = [
{
'draggables': ['molecule'],
'targets': ['left_side_tagret', 'right_side_tagret'],
'rule': 'unordered_equal'
},
{
'draggables': ['p'],
'targets': [
'left_side_tagret[molecule][p_target]',
'right_side_tagret[molecule][p_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': [
'left_side_tagret[molecule][s_target]',
'right_side_tagret[molecule][s_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
'left_side_tagret[molecule][s_target][s][1]',
'right_side_tagret[molecule][s_target][s][1]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'left_side_tagret[molecule][p_target][p][1]',
'left_side_tagret[molecule][p_target][p][3]',
'right_side_tagret[molecule][p_target][p][1]',
'right_side_tagret[molecule][p_target][p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_4_real_example(self):
user_input = json.dumps([
{'single_draggable': 's_l'},
{'single_draggable': 's_r'},
{'single_draggable': 'p_sigma'},
{'single_draggable': 'p_sigma*'},
{'single_draggable': 's_sigma'},
{'single_draggable': 's_sigma*'},
{'double_draggable': 'p_pi*'},
{'double_draggable': 'p_pi'},
{'triple_draggable': 'p_l'},
{'triple_draggable': 'p_r'},
{'up': {'1': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_r'}}},
{'up': {'3': {'triple_draggable': 'p_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_l'}}},
{'up_and_down': {'1': {'single_draggable': 's_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma*'}}},
{'up_and_down': {'1': {'double_draggable': 'p_pi'}}},
{'up_and_down': {'2': {'double_draggable': 'p_pi'}}}
])
# 10 targets:
# s_l, s_r, p_l, p_r, s_sigma, s_sigma*, p_pi, p_sigma, p_pi*, p_sigma*
#
# 3 draggable objects, which have targets (internal target ids - 1, 2, 3):
# single_draggable, double_draggable, triple_draggable
#
# 2 draggable objects:
# up, up_and_down
correct_answer = [
{
'draggables': ['triple_draggable'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['double_draggable'],
'targets': ['p_pi', 'p_pi*'],
'rule': 'unordered_equal'
},
{
'draggables': ['single_draggable'],
'targets': ['s_l', 's_r', 's_sigma', 's_sigma*', 'p_sigma', 'p_sigma*'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[triple_draggable][1]',
'p_l[triple_draggable][2]',
'p_r[triple_draggable][2]',
'p_r[triple_draggable][3]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
's_l[single_draggable][1]',
's_r[single_draggable][1]',
's_sigma[single_draggable][1]',
's_sigma*[single_draggable][1]',
'p_pi[double_draggable][1]',
'p_pi[double_draggable][2]',
],
'rule': 'unordered_equal'
},
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_true(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_wrong(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = []
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_right(self):
user_input = '[]'
correct_answer = []
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_false(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_true(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2', '2': 't1'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_false(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't2', 'name_with_icon': 't2', '2': 't1'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_targets_and_positions(self):
user_input = '[{"1": [10,10]}, \
{"name_with_icon": [[10,10],4]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [[10, 10], 4]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_position_and_targets(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_exact(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [25, 25], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [14, 14], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 30], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_correct_answer_not_has_key_from_user_answer(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'3': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_anywhere(self):
"""Draggables can be places anywhere on base image.
Place grass in the middle of the image and ant in the
right upper corner."""
user_input = '[{"ant":[610.5,57.449951171875]},\
{"grass":[322.5,199.449951171875]}]'
correct_answer = {'grass': [[300, 200], 200], 'ant': [[500, 0], 200]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_correct(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_extra_element_incorrect(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"17":"p_left_3"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_no_mupliples(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target3"},{"2":"target4"},{"2":"target5"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples_wrong(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"}, \
{"2":"target3"}, \
{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_false(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple_false(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused(self):
"""Test a b c in 10 labels reused"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused_false(self):
"""Test a b c in 10 labels reused false"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, {"a":"target8"},\
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse(self):
"""Test reusable draggables """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"},\
{"a":"target5"}]'
correct_answer = [
{
'draggables': ['a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number(self):
"""Test reusable draggables with number """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4'],
'rule': 'anyof+number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number_false(self):
"""Test reusable draggables with numbers, but wrong"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target10'],
'rule': 'anyof_number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_alternative_correct_answer(self):
user_input = '[{"name_with_icon":"t1"},\
{"name_with_icon":"t1"},{"name_with_icon":"t1"},{"name4":"t1"}, \
{"name4":"t1"}]'
correct_answer = [
{'draggables': ['name4'], 'targets': ['t1', 't1'], 'rule': 'exact'},
{'draggables': ['name_with_icon'], 'targets': ['t1', 't1', 't1'],
'rule': 'exact'}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
class Test_DragAndDrop_Populate(unittest.TestCase):
def test_1(self):
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
dnd = draganddrop.DragAndDrop(correct_answer, user_input)
correct_groups = [['1'], ['name_with_icon']]
correct_positions = [{'exact': [[[40, 10], 29]]}, {'exact': [[20, 20]]}]
user_groups = [['1'], ['name_with_icon']]
user_positions = [{'user': [[10, 10]]}, {'user': [[20, 20]]}]
self.assertEqual(correct_groups, dnd.correct_groups)
self.assertEqual(correct_positions, dnd.correct_positions)
self.assertEqual(user_groups, dnd.user_groups)
self.assertEqual(user_positions, dnd.user_positions)
class Test_DraAndDrop_Compare_Positions(unittest.TestCase):
def test_1(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='anyof'))
def test_2a(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='exact'))
def test_2b(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 13], [1, 1]],
flag='exact'))
def test_3(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b"],
user=["a", "b", "c"],
flag='anyof'))
def test_4(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "b"],
flag='anyof'))
def test_5(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='exact'))
def test_6(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='anyof'))
def test_7(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "b"],
user=["a", "c", "b"],
flag='anyof'))
def suite():
testcases = [Test_PositionsCompare,
Test_DragAndDrop_Populate,
Test_DragAndDrop_Grade,
Test_DraAndDrop_Compare_Positions]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
caelan/stripstream | stripstream/utils.py | 1 | 3736 | import os
import random
import shutil
import pickle
from random import shuffle
INF = float('inf')
SEPARATOR = '\n' + 85 * '-' + '\n'
def separator(n=85):
return '\n' + n * '-' + '\n'
def header(s, n=10):
return '\n' + n * '-' + s + n * '-' + '\n'
def set_deterministic(seed=0):
random.seed(seed)
import numpy
numpy.random.seed(seed)
def implies(a, b):
return not a or b
def flatten(iterable_of_iterables):
return (item for iterables in iterable_of_iterables for item in iterables)
def irange(start, end, step=1):
n = start
while n < end:
yield n
n += step
def argmin(function, sequence):
values = list(sequence)
scores = [function(x) for x in values]
return values[scores.index(min(scores))]
def first(function, iterable):
for item in iterable:
if function(item):
return item
return None
def random_sequence(sequence):
indices = range(len(sequence))
shuffle(indices)
for i in indices:
yield sequence[i]
def read(filename):
with open(filename, 'r') as f:
return f.read()
def write(filename, string):
with open(filename, 'w') as f:
f.write(string)
def write_pickle(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f)
def read_pickle(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def safe_remove(p):
if os.path.exists(p):
os.remove(p)
def remove_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
GRAPH_VIZ_COLORS = ['AntiqueWhite', 'Aquamarine', 'Beige', 'Bisque', 'Black', 'BlanchedAlmond', 'Blue', 'BlueViolet',
'Brown', 'Burlywood', 'CadetBlue', 'Chartreuse', 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk',
'Crimson', 'Cyan', 'DarkBlue', 'DarkCyan', 'DarkGoldenrod', 'DarkGray', 'DarkGreen', 'DarkKhaki',
'DarkMagenta', 'DarkOliveGreen', 'DarkOrange', 'DarkOrchid', 'DarkRed', 'DarkSalmon', 'DarkSeaGreen',
'DarkSlateBlue', 'DarkSlateGray', 'DarkTurquoise', 'DarkViolet', 'DeepPink', 'DeepSkyBlue', 'DimGray',
'DodgerBlue', 'Firebrick', 'ForestGreen', 'Fuchsia', 'Gainsboro', 'Gold', 'Goldenrod', 'Gray', 'Green',
'GreenYellow', 'HotPink', 'IndianRed', 'Indigo', 'Khaki', 'Lavender', 'LavenderBlush', 'LawnGreen',
'LemonChiffon', 'LightBlue', 'LightCoral', 'LightCyan', 'LightGoldenrodYellow', 'LightGray',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', 'LightSlateGray',
'LightSteelBlue', 'Lime', 'LimeGreen', 'Magenta', 'Maroon', 'MediumAquamarine', 'MediumBlue',
'MediumOrchid', 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MidnightBlue', 'MistyRose', 'Moccasin', 'NavajoWhite',
'Navy', 'OldLace', 'OliveDrab', 'Orange', 'OrangeRed', 'Orchid', 'PaleGoldenrod', 'PaleGreen',
'PaleTurquoise', 'PaleVioletRed', 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue',
'Purple', 'Red', 'RosyBrown', 'RoyalBlue', 'RoyalBlue', 'Salmon', 'SandyBrown', 'SeaGreen', 'Sienna',
'Silver', 'SkyBlue', 'SlateBlue', 'SlateGray', 'SpringGreen', 'SteelBlue', 'Tan', 'Teal', 'Thistle',
'Tomato', 'Turquoise', 'Violet', 'Wheat', 'Yellow', 'YellowGreen']
GRAPH_VIZ_SHAPES = ['box', 'oval', 'square', 'circle']
def set_union(sequence):
return set().union(*sequence)
| mit |
ethanrowe/python-mandrel | mandrel/test/util/file_finder_test.py | 1 | 2891 | import contextlib
import mock
import os
import unittest
from mandrel.test import utils
from mandrel import util
@contextlib.contextmanager
def scenario(**files_to_levels):
levels = []
with utils.tempdir() as a:
levels.append(a)
with utils.tempdir() as b:
levels.append(b)
with utils.tempdir() as c:
levels.append(c)
for name, dirs in files_to_levels.items():
for level in dirs:
with open(os.path.join(levels[level], name), 'w') as f:
f.write(str(level))
with utils.bootstrap_scenario() as spec:
with mock.patch('mandrel.bootstrap.SEARCH_PATHS', new=levels):
yield levels
def get_level(path):
with open(path, 'r') as f:
return int(f.read())
class TestFileFinder(unittest.TestCase):
def testSingleFindOneMatch(self):
with scenario(**{'a.txt': (0, 1, 2), 'b.foo': (1, 2), 'c.bar': (2,)}) as dirs:
for name, level in {'a.txt': 0, 'b.foo': 1, 'c.bar': 2}.items():
result = tuple(util.find_files(name, dirs, matches=1))
self.assertEqual(1, len(result))
self.assertEqual(level, get_level(result[0]))
def testSingleFindTwoMatch(self):
with scenario(**{'0.x': (0,), 'a.txt': (0, 1, 2), 'b.foo': (1, 2), 'c.bar': (2,)}) as dirs:
for name, levels in {'0.x': (0,), 'a.txt': (0, 1), 'b.foo': (1, 2), 'c.bar': (2,)}.items():
got = tuple(get_level(r) for r in util.find_files(name, dirs, matches=2))
self.assertEqual(levels, got)
def testSingleFindMultiMatch(self):
mapping = {'0.x': (0,), 'a.txt': (0, 1), 'b.blah': (0, 1, 2), 'c.pork': (1, 2), 'd.plonk': (1,), 'e.sporks': (2,)}
with scenario(**mapping) as dirs:
for name, levels in mapping.items():
got = tuple(get_level(r) for r in util.find_files(name, dirs))
self.assertEqual(levels, got)
def testMultiFind(self):
normalize = lambda f: (os.path.basename(f), get_level(f))
with scenario(a=(0, 1, 2), b=(0, 1, 2), c=(1,), d=(0, 2), e=()) as dirs:
self.assertEqual(
[('a', 0), ('a', 1), ('a', 2)],
[normalize(r) for r in util.find_files(('a', 'b'), dirs)])
self.assertEqual(
[('b', 0), ('b', 1), ('b', 2)],
[normalize(r) for r in util.find_files(('b', 'a'), dirs)])
self.assertEqual(
[('a', 0), ('c', 1), ('a', 2)],
[normalize(r) for r in util.find_files(('c', 'a'), dirs)])
self.assertEqual(
[('d', 0), ('c', 1), ('d', 2)],
[normalize(r) for r in util.find_files(('e', 'd', 'c', 'a', 'b'), dirs)])
| mit |
sunsettrack4/android_kernel_oneplus_msm8996 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
Russell-IO/ansible | lib/ansible/module_utils/network/ios/ios.py | 16 | 5663 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
ios_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int')
}
ios_argument_spec = {
'provider': dict(type='dict', options=ios_provider_spec),
}
ios_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int')
}
ios_argument_spec.update(ios_top_spec)
def get_provider_argspec():
return ios_provider_spec
def get_connection(module):
if hasattr(module, '_ios_connection'):
return module._ios_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._ios_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._ios_connection
def get_capabilities(module):
if hasattr(module, '_ios_capabilities'):
return module._ios_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._ios_capabilities = json.loads(capabilities)
return module._ios_capabilities
def check_args(module, warnings):
pass
def get_defaults_flag(module):
connection = get_connection(module)
out = connection.get('show running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line.strip():
commands.add(line.strip().split()[0])
if 'all' in commands:
return ['all']
else:
return ['full']
def get_config(module, flags=None):
flag_str = ' '.join(to_list(flags))
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
out = connection.get_config(flags=flags)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
out = connection.get(command, prompt, answer)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands):
connection = get_connection(module)
out = connection.edit_config(commands)
| gpl-3.0 |
saumishr/django | tests/regressiontests/urlpatterns_reverse/tests.py | 22 | 28501 | """
Unit tests for reverse URL lookups.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, resolve, NoReverseMatch,
Resolver404, ResolverMatch, RegexURLResolver, RegexURLPattern)
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.utils import unittest
from django.contrib.auth.models import User
from . import urlconf_outer, urlconf_inner, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'regressiontests.urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'regressiontests.urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42','37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42','37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer':'78', 'extra':'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/+%5C$*/', [r'+\$*'], {}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1':10}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1':10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf regressiontests.urlpatterns_reverse.no_urls "\
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch, e:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
class ResolverTests(unittest.TestCase):
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'regressiontests.urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404, e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertTrue(isinstance(t, e['type']), '%s is not an instance of %s' % (t, e['type']))
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'regressiontests.urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
user = User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertTrue(isinstance(res, HttpResponseRedirect))
self.assertEqual(res['Location'], '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertTrue(isinstance(res, HttpResponsePermanentRedirect))
self.assertEqual(res['Location'], '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res['Location'], '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res['Location'], '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res['Location'], '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res['Location'], '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res['Location'], 'http://example.com/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res['Location'], '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1':42, 'arg2':37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1':42, 'arg2':37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37,42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37,42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37,42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37,42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37,42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer':'78', 'extra':'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78','foobar']))
class RequestURLconfTests(TestCase):
def setUp(self):
self.root_urlconf = settings.ROOT_URLCONF
self.middleware_classes = settings.MIDDLEWARE_CLASSES
settings.ROOT_URLCONF = urlconf_outer.__name__
def tearDown(self):
settings.ROOT_URLCONF = self.root_urlconf
settings.MIDDLEWARE_CLASSES = self.middleware_classes
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'outer:/test/me/,'
'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
def test_urlconf_overridden(self):
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'outer:,inner:/second_test/')
def test_urlconf_overridden_with_null(self):
settings.MIDDLEWARE_CLASSES += (
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'regressiontests.urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'regressiontests.urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
class ErroneousViewTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
| bsd-3-clause |
jaloren/robotframework | src/robot/variables/isvar.py | 6 | 1651 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.utils import is_string
from .splitter import VariableIterator
def is_var(string, identifiers='$@&'):
if not string or not is_string(string) or len(string) < 4:
return False
if string[0] not in identifiers or string[1] != '{' or string[-1] != '}':
return False
body = string[2:-1]
return '{' not in body and '}' not in body
def is_scalar_var(string):
return is_var(string, identifiers='$')
def is_list_var(string):
return is_var(string, identifiers='@')
def is_dict_var(string):
return is_var(string, identifiers='&')
def contains_var(string, identifiers='$@&'):
return (is_string(string) and
any(i in string for i in identifiers) and
'{' in string and '}' in string and
bool(VariableIterator(string, identifiers)))
def validate_var(string, identifiers='$@&'):
if not is_var(string, identifiers):
raise DataError("Invalid variable name '%s'." % string)
| apache-2.0 |
SeldonIO/seldon-server | python/seldon/cli/zk_utils.py | 2 | 3835 | import json
from os import walk
import os
import sys
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def is_json_data(data):
if (data != None) and (len(data)>0):
return data[0] == '{' or data[0] == '['
else:
return False
def push_all_nodes(zk_client,zkroot):
for (dirpath, dirnames, filenames) in walk(zkroot):
for filename in filenames:
file_path = dirpath + "/" + filename
f = open(file_path)
data = f.read()
f.close()
node_path = file_path.replace(zkroot,"").replace("/_data_","")
node_set(zk_client,node_path,data)
def get_all_nodes_list(zk_client, start_node, all_nodes_list):
#print "processing: {}".format(start_node)
try:
children = zk_client.get_children(start_node)
for child in children:
child = str(child)
node_path = start_node+"/"+child if start_node != '/' else "/"+child
all_nodes_list.add(node_path)
get_all_nodes_list(zk_client, node_path, all_nodes_list)
except kazoo.exceptions.NoNodeError:
pass
def write_data_to_file(data_fpath, data):
json = dict_to_json(data, True) if isinstance(data,dict) else str(data)
mkdir_p(os.path.dirname(data_fpath))
f = open(data_fpath,'w')
f.write(json)
f.write('\n')
f.close()
print "Writing data to file[{data_fpath}]".format(**locals())
def dict_to_json(d, expand=False):
return json.dumps(d, sort_keys=True, indent=4, separators=(',', ': ')) if expand else json.dumps(d, sort_keys=True, separators=(',',':'))
def json_to_dict(json_data):
return json.loads(json_data)
def pull_all_nodes(zk_client,zkroot):
all_nodes_list = set()
nodes = ["/config","/all_clients"]
for node in nodes:
start_node = node
get_all_nodes_list(zk_client, start_node, all_nodes_list)
all_nodes_list = list(all_nodes_list)
for node_path in all_nodes_list:
if node_path == "/config/topics" or node_path == "/config/clients" or node_path == "/config/changes" or node_path == "/config/users":
print "Ignoring kafka data node ",node_path
else:
print "trying to sync ",node_path
node_value = node_get(zk_client,node_path)
if not node_value is None:
node_value = node_value.strip()
if is_json_data(node_value):
data = json_to_dict(node_value) if node_value != None and len(node_value)>0 else ""
else:
data = str(node_value)
data_fpath = zkroot + node_path + "/_data_"
write_data_to_file(data_fpath, data)
def json_compress(json_data):
d = json.loads(json_data)
return json.dumps(d, sort_keys=True, separators=(',',':'))
def node_set(zk_client, node_path, node_value):
if is_json_data(node_value):
node_value = json_compress(node_value)
node_value = node_value.strip() if node_value != None else node_value
if zk_client.exists(node_path):
retVal = zk_client.set(node_path,node_value)
else:
retVal = zk_client.create(node_path,node_value,makepath=True)
print "updated zk node[{node_path}]".format(node_path=node_path)
def node_get(zk_client, node_path):
theValue = None
if zk_client.exists(node_path):
theValue = zk_client.get(node_path)
theValue = theValue[0]
return theValue.strip() if theValue != None else theValue
def node_delete(zk_client, node_path):
if zk_client.exists(node_path):
retVal = zk_client.delete(node_path)
print "deleted zk node[{node_path}]".format(node_path=node_path)
| apache-2.0 |
dougwig/x-neutron-lbaas | neutron_lbaas/openstack/common/cache/backends.py | 76 | 7793 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
NOTSET = object()
@six.add_metaclass(abc.ABCMeta)
class BaseCache(object):
"""Base Cache Abstraction
:params parsed_url: Parsed url object.
:params options: A dictionary with configuration parameters
for the cache. For example:
- default_ttl: An integer defining the default ttl for keys.
"""
def __init__(self, parsed_url, options=None):
self._parsed_url = parsed_url
self._options = options or {}
self._default_ttl = int(self._options.get('default_ttl', 0))
@abc.abstractmethod
def _set(self, key, value, ttl, not_exists=False):
"""Implementations of this class have to override this method."""
def set(self, key, value, ttl, not_exists=False):
"""Sets or updates a cache entry
.. note:: Thread-safety is required and has to be guaranteed by the
backend implementation.
:params key: Item key as string.
:type key: `unicode string`
:params value: Value to assign to the key. This can be anything that
is handled by the current backend.
:params ttl: Key's timeout in seconds. 0 means no timeout.
:type ttl: int
:params not_exists: If True, the key will be set if it doesn't exist.
Otherwise, it'll always be set.
:type not_exists: bool
:returns: True if the operation succeeds, False otherwise.
"""
if ttl is None:
ttl = self._default_ttl
return self._set(key, value, ttl, not_exists)
def __setitem__(self, key, value):
self.set(key, value, self._default_ttl)
def setdefault(self, key, value):
"""Sets the key value to `value` if it doesn't exist
:params key: Item key as string.
:type key: `unicode string`
:params value: Value to assign to the key. This can be anything that
is handled by the current backend.
"""
try:
return self[key]
except KeyError:
self[key] = value
return value
@abc.abstractmethod
def _get(self, key, default):
"""Implementations of this class have to override this method."""
def get(self, key, default=None):
"""Gets one item from the cache
.. note:: Thread-safety is required and it has to be guaranteed
by the backend implementation.
:params key: Key for the item to retrieve from the cache.
:params default: The default value to return.
:returns: `key`'s value in the cache if it exists, otherwise
`default` should be returned.
"""
return self._get(key, default)
def __getitem__(self, key):
value = self.get(key, NOTSET)
if value is NOTSET:
raise KeyError
return value
@abc.abstractmethod
def __delitem__(self, key):
"""Removes an item from cache.
.. note:: Thread-safety is required and it has to be guaranteed by
the backend implementation.
:params key: The key to remove.
:returns: The key value if there's one
"""
@abc.abstractmethod
def _clear(self):
"""Implementations of this class have to override this method."""
def clear(self):
"""Removes all items from the cache.
.. note:: Thread-safety is required and it has to be guaranteed by
the backend implementation.
"""
return self._clear()
@abc.abstractmethod
def _incr(self, key, delta):
"""Implementations of this class have to override this method."""
def incr(self, key, delta=1):
"""Increments the value for a key
:params key: The key for the value to be incremented
:params delta: Number of units by which to increment the value.
Pass a negative number to decrement the value.
:returns: The new value
"""
return self._incr(key, delta)
@abc.abstractmethod
def _append_tail(self, key, tail):
"""Implementations of this class have to override this method."""
def append_tail(self, key, tail):
"""Appends `tail` to `key`'s value.
:params key: The key of the value to which `tail` should be appended.
:params tail: The list of values to append to the original.
:returns: The new value
"""
if not hasattr(tail, "__iter__"):
raise TypeError('Tail must be an iterable')
if not isinstance(tail, list):
# NOTE(flaper87): Make sure we pass a list
# down to the implementation. Not all drivers
# have support for generators, sets or other
# iterables.
tail = list(tail)
return self._append_tail(key, tail)
def append(self, key, value):
"""Appends `value` to `key`'s value.
:params key: The key of the value to which `tail` should be appended.
:params value: The value to append to the original.
:returns: The new value
"""
return self.append_tail(key, [value])
@abc.abstractmethod
def __contains__(self, key):
"""Verifies that a key exists.
:params key: The key to verify.
:returns: True if the key exists, otherwise False.
"""
@abc.abstractmethod
def _get_many(self, keys, default):
"""Implementations of this class have to override this method."""
return ((k, self.get(k, default=default)) for k in keys)
def get_many(self, keys, default=NOTSET):
"""Gets keys' value from cache
:params keys: List of keys to retrieve.
:params default: The default value to return for each key that is not
in the cache.
:returns: A generator of (key, value)
"""
return self._get_many(keys, default)
@abc.abstractmethod
def _set_many(self, data, ttl):
"""Implementations of this class have to override this method."""
for key, value in data.items():
self.set(key, value, ttl=ttl)
def set_many(self, data, ttl=None):
"""Puts several items into the cache at once
Depending on the backend, this operation may or may not be efficient.
The default implementation calls set for each (key, value) pair
passed, other backends support set_many operations as part of their
protocols.
:params data: A dictionary like {key: val} to store in the cache.
:params ttl: Key's timeout in seconds.
"""
if ttl is None:
ttl = self._default_ttl
self._set_many(data, ttl)
def update(self, **kwargs):
"""Sets several (key, value) paris.
Refer to the `set_many` docstring.
"""
self.set_many(kwargs, ttl=self._default_ttl)
@abc.abstractmethod
def _unset_many(self, keys):
"""Implementations of this class have to override this method."""
for key in keys:
del self[key]
def unset_many(self, keys):
"""Removes several keys from the cache at once
:params keys: List of keys to unset.
"""
self._unset_many(keys)
| apache-2.0 |
mats116/gae-boilerplate | bp_includes/external/requests/packages/urllib3/poolmanager.py | 93 | 8976 | # urllib3/poolmanager.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param poxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| lgpl-3.0 |
qtproject/pyside-pyside | tests/QtCore/bug_332.py | 1 | 1615 | #!/usr/bin/python
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
from PySide2 import QtCore
class Lock(QtCore.QMutex):
def tryLock(self,timeoutt=10):
return QtCore.QMutex.tryLock(self,timeoutt)
class TestBug(unittest.TestCase):
def testCase(self):
l = Lock()
l.tryLock() # this cause a assertion
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
blueyed/pip | pip/_vendor/distlib/resources.py | 191 | 9432 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
parts = resource_name.split('/')
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
_finder_registry[_frozen_importlib.FileFinder] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
| mit |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/test/test_file_eintr.py | 122 | 10331 | # Written to test interrupted system calls interfering with our many buffered
# IO implementations. http://bugs.python.org/issue12268
#
# It was suggested that this code could be merged into test_io and the tests
# made to work using the same method as the existing signal tests in test_io.
# I was unable to get single process tests using alarm or setitimer that way
# to reproduce the EINTR problems. This process based test suite reproduces
# the problems prior to the issue12268 patch reliably on Linux and OSX.
# - gregory.p.smith
import os
import select
import signal
import subprocess
import sys
from test.support import run_unittest
import time
import unittest
# Test import all of the things we're about to try testing up front.
from _io import FileIO
@unittest.skipUnless(os.name == 'posix', 'tests requires a posix system.')
class TestFileIOSignalInterrupt(unittest.TestCase):
def setUp(self):
self._process = None
def tearDown(self):
if self._process and self._process.poll() is None:
try:
self._process.kill()
except OSError:
pass
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code for the reader process.
subclasseses should override this to test different IO objects.
"""
return ('import _io ;'
'infile = _io.FileIO(sys.stdin.fileno(), "rb")')
def fail_with_process_info(self, why, stdout=b'', stderr=b'',
communicate=True):
"""A common way to cleanup and fail with useful debug output.
Kills the process if it is still running, collects remaining output
and fails the test with an error message including the output.
Args:
why: Text to go after "Error from IO process" in the message.
stdout, stderr: standard output and error from the process so
far to include in the error message.
communicate: bool, when True we call communicate() on the process
after killing it to gather additional output.
"""
if self._process.poll() is None:
time.sleep(0.1) # give it time to finish printing the error.
try:
self._process.terminate() # Ensure it dies.
except OSError:
pass
if communicate:
stdout_end, stderr_end = self._process.communicate()
stdout += stdout_end
stderr += stderr_end
self.fail('Error from IO process %s:\nSTDOUT:\n%sSTDERR:\n%s\n' %
(why, stdout.decode(), stderr.decode()))
def _test_reading(self, data_to_write, read_and_verify_code):
"""Generic buffered read method test harness to validate EINTR behavior.
Also validates that Python signal handlers are run during the read.
Args:
data_to_write: String to write to the child process for reading
before sending it a signal, confirming the signal was handled,
writing a final newline and closing the infile pipe.
read_and_verify_code: Single "line" of code to read from a file
object named 'infile' and validate the result. This will be
executed as part of a python subprocess fed data_to_write.
"""
infile_setup_code = self._generate_infile_setup_code()
# Total pipe IO in this function is smaller than the minimum posix OS
# pipe buffer size of 512 bytes. No writer should block.
assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
# Start a subprocess to call our read method while handling a signal.
self._process = subprocess.Popen(
[sys.executable, '-u', '-c',
'import signal, sys ;'
'signal.signal(signal.SIGINT, '
'lambda s, f: sys.stderr.write("$\\n")) ;'
+ infile_setup_code + ' ;' +
'sys.stderr.write("Worm Sign!\\n") ;'
+ read_and_verify_code + ' ;' +
'infile.close()'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the signal handler to be installed.
worm_sign = self._process.stderr.read(len(b'Worm Sign!\n'))
if worm_sign != b'Worm Sign!\n': # See also, Dune by Frank Herbert.
self.fail_with_process_info('while awaiting a sign',
stderr=worm_sign)
self._process.stdin.write(data_to_write)
signals_sent = 0
rlist = []
# We don't know when the read_and_verify_code in our child is actually
# executing within the read system call we want to interrupt. This
# loop waits for a bit before sending the first signal to increase
# the likelihood of that. Implementations without correct EINTR
# and signal handling usually fail this test.
while not rlist:
rlist, _, _ = select.select([self._process.stderr], (), (), 0.05)
self._process.send_signal(signal.SIGINT)
signals_sent += 1
if signals_sent > 200:
self._process.kill()
self.fail('reader process failed to handle our signals.')
# This assumes anything unexpected that writes to stderr will also
# write a newline. That is true of the traceback printing code.
signal_line = self._process.stderr.readline()
if signal_line != b'$\n':
self.fail_with_process_info('while awaiting signal',
stderr=signal_line)
# We append a newline to our input so that a readline call can
# end on its own before the EOF is seen and so that we're testing
# the read call that was interrupted by a signal before the end of
# the data stream has been reached.
stdout, stderr = self._process.communicate(input=b'\n')
if self._process.returncode:
self.fail_with_process_info(
'exited rc=%d' % self._process.returncode,
stdout, stderr, communicate=False)
# PASS!
# String format for the read_and_verify_code used by read methods.
_READING_CODE_TEMPLATE = (
'got = infile.{read_method_name}() ;'
'expected = {expected!r} ;'
'assert got == expected, ('
'"{read_method_name} returned wrong data.\\n"'
'"got data %r\\nexpected %r" % (got, expected))'
)
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected=b'hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=[b'hello\n', b'world!\n']))
def test_readall(self):
"""readall() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readall',
expected=b'hello\nworld!\n'))
# read() is the same thing as readall().
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestBufferedIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a BufferedReader."""
return ('infile = open(sys.stdin.fileno(), "rb") ;'
'import _io ;assert isinstance(infile, _io.BufferedReader)')
def test_readall(self):
"""BufferedReader.read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestTextIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a TextIOWrapper."""
return ('infile = open(sys.stdin.fileno(), "rt", newline=None) ;'
'import _io ;assert isinstance(infile, _io.TextIOWrapper)')
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected='hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\r\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=['hello\n', 'world!\n']))
def test_readall(self):
"""read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected="hello\nworld!\n"))
def test_main():
test_cases = [
tc for tc in globals().values()
if isinstance(tc, type) and issubclass(tc, unittest.TestCase)]
run_unittest(*test_cases)
if __name__ == '__main__':
test_main()
| apache-2.0 |
alistairlow/tensorflow | tensorflow/contrib/keras/api/keras/optimizers/__init__.py | 73 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Optimizer classes.
from tensorflow.python.keras._impl.keras.optimizers import Adadelta
from tensorflow.python.keras._impl.keras.optimizers import Adagrad
from tensorflow.python.keras._impl.keras.optimizers import Adam
from tensorflow.python.keras._impl.keras.optimizers import Adamax
from tensorflow.python.keras._impl.keras.optimizers import Nadam
from tensorflow.python.keras._impl.keras.optimizers import Optimizer
from tensorflow.python.keras._impl.keras.optimizers import RMSprop
from tensorflow.python.keras._impl.keras.optimizers import SGD
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.optimizers import deserialize
from tensorflow.python.keras._impl.keras.optimizers import serialize
from tensorflow.python.keras._impl.keras.optimizers import get
del absolute_import
del division
del print_function
| apache-2.0 |
andmos/ansible | lib/ansible/modules/notification/rabbitmq_publish.py | 49 | 5482 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, John Imison <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_publish
short_description: Publish a message to a RabbitMQ queue.
version_added: "2.8"
description:
- Publish a message on a RabbitMQ queue using a blocking connection.
options:
url:
description:
- An URL connection string to connect to the RabbitMQ server.
- I(url) and I(host)/I(port)/I(user)/I(pass)/I(vhost) are mutually exclusive, use either or but not both.
proto:
description:
- The protocol to use.
choices: [amqps, amqp]
host:
description:
- The RabbitMQ server hostname or IP.
port:
description:
- The RabbitMQ server port.
username:
description:
- The RabbitMQ username.
password:
description:
- The RabbitMQ password.
vhost:
description:
- The virtual host to target.
- If default vhost is required, use C('%2F').
queue:
description:
- The queue to publish a message to. If no queue is specified, RabbitMQ will return a random queue name.
exchange:
description:
- The exchange to publish a message to.
routing_key:
description:
- The routing key.
body:
description:
- The body of the message.
- A C(body) cannot be provided if a C(src) is specified.
src:
description:
- A file to upload to the queue. Automatic mime type detection is attempted if content_type is not defined (left as default).
- A C(src) cannot be provided if a C(body) is specified.
- The filename is added to the headers of the posted message to RabbitMQ. Key being the C(filename), value is the filename.
aliases: ['file']
content_type:
description:
- The content type of the body.
default: text/plain
durable:
description:
- Set the queue to be durable.
default: False
type: bool
exclusive:
description:
- Set the queue to be exclusive.
default: False
type: bool
auto_delete:
description:
- Set the queue to auto delete.
default: False
type: bool
headers:
description:
- A dictionary of headers to post with the message.
default: {}
type: dict
requirements: [ pika ]
notes:
- This module requires the pika python library U(https://pika.readthedocs.io/).
- Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library.
- This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed.
author: "John Imison (@Im0)"
'''
EXAMPLES = '''
- name: Publish a message to a queue with headers
rabbitmq_publish:
url: "amqp://guest:[email protected]:5672/%2F"
queue: 'test'
body: "Hello world from ansible module rabbitmq_publish"
content_type: "text/plain"
headers:
myHeader: myHeaderValue
- name: Publish a file to a queue
rabbitmq_publish:
url: "amqp://guest:[email protected]:5672/%2F"
queue: 'images'
file: 'path/to/logo.gif'
- name: RabbitMQ auto generated queue
rabbitmq_publish:
url: "amqp://guest:[email protected]:5672/%2F"
body: "Hello world random queue from ansible module rabbitmq_publish"
content_type: "text/plain"
'''
RETURN = '''
result:
description:
- Contains the status I(msg), content type I(content_type) and the queue name I(queue).
returned: success
type: dict
sample: |
'result': { 'content_type': 'text/plain', 'msg': 'Successfully published to queue test', 'queue': 'test' }
'''
try:
import pika
HAS_PIKA = True
except ImportError:
HAS_PIKA = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.rabbitmq import RabbitClient
def main():
argument_spec = RabbitClient.rabbitmq_argument_spec()
argument_spec.update(
exchange=dict(type='str', default=''),
routing_key=dict(type='str', required=False),
body=dict(type='str', required=False),
src=dict(aliases=['file'], type='path', required=False),
content_type=dict(default="text/plain", type='str'),
durable=dict(default=False, type='bool'),
exclusive=dict(default=False, type='bool'),
auto_delete=dict(default=False, type='bool'),
headers=dict(default={}, type='dict')
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['body', 'src']],
supports_check_mode=False
)
rabbitmq = RabbitClient(module)
if rabbitmq.basic_publish():
rabbitmq.close_connection()
module.exit_json(changed=True, result={"msg": "Successfully published to queue %s" % rabbitmq.queue,
"queue": rabbitmq.queue,
"content_type": rabbitmq.content_type})
else:
rabbitmq.close_connection()
module.fail_json(changed=False, msg="Unsuccessful publishing to queue %s" % rabbitmq.queue)
if __name__ == '__main__':
main()
| gpl-3.0 |
googleapis/googleapis-gen | google/logging/v2/logging-v2-py/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py | 2 | 16399 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging_metrics
from google.protobuf import empty_pb2 # type: ignore
from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import MetricsServiceV2GrpcTransport
class MetricsServiceV2GrpcAsyncIOTransport(MetricsServiceV2Transport):
"""gRPC AsyncIO backend transport for MetricsServiceV2.
Service for configuring logs-based metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_log_metrics(self) -> Callable[
[logging_metrics.ListLogMetricsRequest],
Awaitable[logging_metrics.ListLogMetricsResponse]]:
r"""Return a callable for the list log metrics method over gRPC.
Lists logs-based metrics.
Returns:
Callable[[~.ListLogMetricsRequest],
Awaitable[~.ListLogMetricsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_log_metrics' not in self._stubs:
self._stubs['list_log_metrics'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/ListLogMetrics',
request_serializer=logging_metrics.ListLogMetricsRequest.serialize,
response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize,
)
return self._stubs['list_log_metrics']
@property
def get_log_metric(self) -> Callable[
[logging_metrics.GetLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the get log metric method over gRPC.
Gets a logs-based metric.
Returns:
Callable[[~.GetLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_log_metric' not in self._stubs:
self._stubs['get_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/GetLogMetric',
request_serializer=logging_metrics.GetLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['get_log_metric']
@property
def create_log_metric(self) -> Callable[
[logging_metrics.CreateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the create log metric method over gRPC.
Creates a logs-based metric.
Returns:
Callable[[~.CreateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_log_metric' not in self._stubs:
self._stubs['create_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/CreateLogMetric',
request_serializer=logging_metrics.CreateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['create_log_metric']
@property
def update_log_metric(self) -> Callable[
[logging_metrics.UpdateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the update log metric method over gRPC.
Creates or updates a logs-based metric.
Returns:
Callable[[~.UpdateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_log_metric' not in self._stubs:
self._stubs['update_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/UpdateLogMetric',
request_serializer=logging_metrics.UpdateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['update_log_metric']
@property
def delete_log_metric(self) -> Callable[
[logging_metrics.DeleteLogMetricRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log metric method over gRPC.
Deletes a logs-based metric.
Returns:
Callable[[~.DeleteLogMetricRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_log_metric' not in self._stubs:
self._stubs['delete_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/DeleteLogMetric',
request_serializer=logging_metrics.DeleteLogMetricRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_log_metric']
__all__ = (
'MetricsServiceV2GrpcAsyncIOTransport',
)
| apache-2.0 |
petosegan/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
jankeromnes/depot_tools | recipes/recipe_util.py | 36 | 1597 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module holds utilities which make writing recipes easier."""
import json
class Recipe(object):
"""Base class for all recipes.
Provides methods that are expected to be overridden by child classes. Also
provides an command-line parsing method that converts the unified command-line
interface used in depot_tools to the unified python interface defined here."""
@staticmethod
def fetch_spec(_props):
"""Returns instructions to check out the project, conditioned on |props|."""
raise NotImplementedError
@staticmethod
def expected_root(_props):
"""Returns the directory into which the checkout will be performed."""
raise NotImplementedError
def handle_args(self, argv):
"""Passes the command-line arguments through to the appropriate method."""
methods = {'fetch': self.fetch_spec,
'root': self.expected_root}
if len(argv) <= 1 or argv[1] not in methods:
print 'Must specify a a fetch/root action'
return 1
def looks_like_arg(arg):
return arg.startswith('--') and arg.count('=') == 1
bad_parms = [x for x in argv[2:] if not looks_like_arg(x)]
if bad_parms:
print 'Got bad arguments %s' % bad_parms
return 1
method = methods[argv[1]]
props = dict(x.split('=', 1) for x in (y.lstrip('-') for y in argv[2:]))
self.output(method(props))
@staticmethod
def output(data):
print(json.dumps(data))
| bsd-3-clause |
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/cgi.py | 52 | 34826 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import mimetools
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
return list(set(item.name for item in self.list))
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = list = []
for key, value in urlparse.parse_qsl(qs, self.keep_blank_values,
self.strict_parsing):
list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
for key, value in urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values, self.strict_parsing):
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing)
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if delim == "\r":
line = delim + line
delim = ""
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
elif line[-1] == "\r":
# We may interrupt \r\n sequences if they span the 2**16
# byte boundary
delim = "\r"
line = line[:-1]
last_line_lfend = False
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| mit |
koparasy/faultinjection-gem5 | src/arch/x86/isa/insts/x87/arithmetic/addition.py | 29 | 2165 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FADD
# FADDP
# FIADD
'''
| bsd-3-clause |
AlexCaranha/Wox | PythonHome/Lib/site-packages/requests/adapters.py | 293 | 14608 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr, request=request)
except MaxRetryError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, TimeoutError):
raise Timeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
okuoku/ninja | bootstrap.py | 1 | 4690 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
import sys
import os
import glob
import errno
import shlex
import subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
parser = OptionParser()
parser.add_option('--verbose', action='store_true',
help='enable verbose build',)
parser.add_option('--x64', action='store_true',
help='force 64-bit build (Windows)',)
(options, conf_args) = parser.parse_args()
def run(*args, **kwargs):
returncode = subprocess.call(*args, **kwargs)
if returncode != 0:
sys.exit(returncode)
# Compute system-specific CFLAGS/LDFLAGS as used in both in the below
# g++ call as well as in the later configure.py.
cflags = os.environ.get('CFLAGS', '').split()
ldflags = os.environ.get('LDFLAGS', '').split()
if sys.platform.startswith('freebsd'):
cflags.append('-I/usr/local/include')
ldflags.append('-L/usr/local/lib')
print 'Building ninja manually...'
try:
os.mkdir('build')
except OSError, e:
if e.errno != errno.EEXIST:
raise
sources = []
for src in glob.glob('src/*.cc'):
if src.endswith('test.cc') or src.endswith('.in.cc'):
continue
if src.endswith('bench.cc'):
continue
filename = os.path.basename(src)
if filename == 'browse.cc': # Depends on generated header.
continue
if sys.platform.startswith('win32'):
if src.endswith('-posix.cc'):
continue
else:
if src.endswith('-win32.cc'):
continue
if '_main' in src:
continue
sources.append(src)
if sys.platform.startswith('linux'):
sources.append('src/clockgettime-linux.c')
if sys.platform.startswith('win32'):
sources.append('src/getopt.c')
vcdir = os.environ.get('VCINSTALLDIR')
if vcdir:
if options.x64:
cl = [os.path.join(vcdir, 'bin', 'amd64', 'cl.exe')]
else:
cl = [os.path.join(vcdir, 'bin', 'cl.exe')]
args = cl + ['/nologo', '/EHsc', '/DNOMINMAX']
else:
args = shlex.split(os.environ.get('CXX', 'g++'))
cflags.extend(['-Wno-deprecated',
'-DNINJA_PYTHON="' + sys.executable + '"',
'-DNINJA_BOOTSTRAP'])
if sys.platform.startswith('win32'):
cflags.append('-D_WIN32_WINNT=0x0501')
if options.x64:
cflags.append('-m64')
args.extend(cflags)
args.extend(ldflags)
binary = 'ninja.bootstrap'
if sys.platform.startswith('win32'):
binary = 'ninja.bootstrap.exe'
args.extend(sources)
if vcdir:
args.extend(['/link', '/out:' + binary])
else:
args.extend(['-o', binary])
if options.verbose:
print ' '.join(args)
run(args)
verbose = []
if options.verbose:
verbose = ['-v']
if sys.platform.startswith('win32'):
# Build ninja-msvc-helper using ninja without an msvc-helper.
print 'Building ninja-msvc-helper...'
run([sys.executable, 'configure.py', '--with-msvc-helper='] + conf_args)
run(['./' + binary] + verbose + ['ninja-msvc-helper'])
# Rename the helper to the same name + .bootstrap.
helper_binary = 'ninja-msvc-helper.bootstrap.exe'
try:
os.unlink(helper_binary)
except:
pass
os.rename('ninja-msvc-helper.exe', helper_binary)
# Build ninja using the newly-built msvc-helper.
print 'Building ninja using itself...'
run([sys.executable, 'configure.py',
'--with-msvc-helper=%s' % helper_binary] + conf_args)
run(['./' + binary] + verbose)
# Clean up.
for obj in glob.glob('*.obj'):
os.unlink(obj)
print """
Done!
Note: to work around Windows file locking, where you can't rebuild an
in-use binary, to run ninja after making any changes to build ninja itself
you should run ninja.bootstrap instead. Your build is also configured to
use ninja-msvc-helper.bootstrap.exe instead of the ninja-msvc-helper.exe
that it builds; see the --help output of configure.py."""
else:
print 'Building ninja using itself...'
run([sys.executable, 'configure.py'] + conf_args)
run(['./' + binary] + verbose)
#NOTE: ./nina -t clean remove itself! os.unlink(binary)
print 'Done!'
| apache-2.0 |
3rdcycle/pyqtgraph | pyqtgraph/python2_3.py | 14 | 1726 | """
Helper functions that smooth out the differences between python 2 and 3.
"""
import sys
def asUnicode(x):
if sys.version_info[0] == 2:
if isinstance(x, unicode):
return x
elif isinstance(x, str):
return x.decode('UTF-8')
else:
return unicode(x)
else:
return str(x)
def cmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def sortList(l, cmpFunc):
if sys.version_info[0] == 2:
l.sort(cmpFunc)
else:
l.sort(key=cmpToKey(cmpFunc))
if sys.version_info[0] == 3:
import builtins
builtins.basestring = str
#builtins.asUnicode = asUnicode
#builtins.sortList = sortList
basestring = str
def cmp(a,b):
if a>b:
return 1
elif b > a:
return -1
else:
return 0
builtins.cmp = cmp
builtins.xrange = range
#else: ## don't use __builtin__ -- this confuses things like pyshell and ActiveState's lazy import recipe
#import __builtin__
#__builtin__.asUnicode = asUnicode
#__builtin__.sortList = sortList
| mit |
salomon1184/bite-project | deps/gdata-python-client/samples/analytics/data_feed_demo.py | 23 | 6246 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample Google Analytics Data Export API Data Feed application.
This sample demonstrates how to make requests and retrieve the important
information from the Google Analytics Data Export API Data Feed. This
sample requires a Google Analytics username and password and uses the
Client Login authorization routine.
Class DataFeedDemo: Prints all the important Data Feed informantion.
"""
__author__ = '[email protected] (Nick Mihailovski)'
import gdata.analytics.client
import gdata.sample_util
def main():
"""Main function for the sample."""
demo = DataFeedDemo()
demo.PrintFeedDetails()
demo.PrintDataSources()
demo.PrintFeedAggregates()
demo.PrintSegmentInfo()
demo.PrintOneEntry()
demo.PrintFeedTable()
class DataFeedDemo(object):
"""Gets data from the Data Feed.
Attributes:
data_feed: Google Analytics AccountList returned form the API.
"""
def __init__(self):
"""Inits DataFeedDemo."""
SOURCE_APP_NAME = 'Google-dataFeedDemoPython-v2'
my_client = gdata.analytics.client.AnalyticsClient(source=SOURCE_APP_NAME)
try:
gdata.sample_util.authorize_client(
my_client,
service=my_client.auth_service,
source=SOURCE_APP_NAME,
scopes=['https://www.google.com/analytics/feeds/'])
except gdata.client.BadAuthentication:
exit('Invalid user credentials given.')
except gdata.client.Error:
exit('Login Error')
table_id = gdata.sample_util.get_param(
name='table_id',
prompt='Please enter your Google Analytics Table id (format ga:xxxx)')
# DataFeedQuery simplifies constructing API queries and uri encodes params.
data_query = gdata.analytics.client.DataFeedQuery({
'ids': table_id,
'start-date': '2008-10-01',
'end-date': '2008-10-30',
'dimensions': 'ga:source,ga:medium',
'metrics': 'ga:visits',
'sort': '-ga:visits',
'filters': 'ga:medium==referral',
'max-results': '50'})
self.feed = my_client.GetDataFeed(data_query)
def PrintFeedDetails(self):
"""Prints important Analytics related data found at the top of the feed."""
print '\n-------- Feed Data --------'
print 'Feed Title = ' + self.feed.title.text
print 'Feed Id = ' + self.feed.id.text
print 'Total Results Found = ' + self.feed.total_results.text
print 'Start Index = ' + self.feed.start_index.text
print 'Results Returned = ' + self.feed.items_per_page.text
print 'Start Date = ' + self.feed.start_date.text
print 'End Date = ' + self.feed.end_date.text
print 'Has Sampled Data = ' + str(self.feed.HasSampledData())
def PrintDataSources(self):
"""Prints data found in the data source elements.
This data has information about the Google Analytics account the referenced
table ID belongs to. Note there is currently exactly one data source in
the data feed.
"""
data_source = self.feed.data_source[0]
print '\n-------- Data Source Data --------'
print 'Table ID = ' + data_source.table_id.text
print 'Table Name = ' + data_source.table_name.text
print 'Web Property Id = ' + data_source.GetProperty('ga:webPropertyId').value
print 'Profile Id = ' + data_source.GetProperty('ga:profileId').value
print 'Account Name = ' + data_source.GetProperty('ga:accountName').value
def PrintFeedAggregates(self):
"""Prints data found in the aggregates elements.
This contains the sum of all the metrics defined in the query across.
This sum spans all the rows matched in the feed.total_results property
and not just the rows returned by the response.
"""
aggregates = self.feed.aggregates
print '\n-------- Metric Aggregates --------'
for met in aggregates.metric:
print ''
print 'Metric Name = ' + met.name
print 'Metric Value = ' + met.value
print 'Metric Type = ' + met.type
print 'Metric CI = ' + met.confidence_interval
def PrintSegmentInfo(self):
"""Prints segment information if the query has advanced segments
defined."""
print '-------- Advanced Segments Information --------'
if self.feed.segment:
if segment.name:
print 'Segment Name = ' + str(segment.name)
if segment.id:
print 'Segment Id = ' + str(segment.id)
print 'Segment Definition = ' + segment.definition.text
else:
print 'No segments defined'
def PrintOneEntry(self):
"""Prints all the important Google Analytics data found in an entry"""
print '\n-------- One Entry --------'
if len(self.feed.entry) == 0:
print 'No entries found'
return
entry = self.feed.entry[0]
print 'ID = ' + entry.id.text
for dim in entry.dimension:
print 'Dimension Name = ' + dim.name
print 'Dimension Value = ' + dim.value
for met in entry.metric:
print 'Metric Name = ' + met.name
print 'Metric Value = ' + met.value
print 'Metric Type = ' + met.type
print 'Metric CI = ' + met.confidence_interval
def PrintFeedTable(self):
"""Prints all the entries as a table."""
print '\n-------- All Entries In a Table --------'
for entry in self.feed.entry:
for dim in entry.dimension:
print ('Dimension Name = %s \t Dimension Value = %s'
% (dim.name, dim.value))
for met in entry.metric:
print ('Metric Name = %s \t Metric Value = %s'
% (met.name, met.value))
print '---'
if __name__ == '__main__':
main()
| apache-2.0 |
miciux/telegram-bot-admin | torrenthandler.py | 1 | 5064 | import logging
import abstracthandler
import transmissionrpc
import os
class TorrentHandler(abstracthandler.AbstractHandler):
def __init__(self, conf, bot):
abstracthandler.AbstractHandler.__init__(self, 'torrent', conf, bot)
self.log = logging.getLogger(__name__)
self.tc = None
self.commands={}
self.commands['list'] = self.get_torrent_list
self.commands['info'] = self.get_torrent_info
self.commands['add'] = self.add_torrent
self.commands['remove'] = self.remove_torrent
self.commands['stop'] = self.stop_torrent
self.commands['start'] = self.start_torrent
self.commands['stopall'] = self.stop_all
self.commands['startall'] = self.start_all
self.commands['reload'] = self.reload
def reload(self, cid, args):
os.system('sudo service transmission-daemon reload')
def connect(self):
self.tc = transmissionrpc.Client('localhost', port=9091)
def handle_message(self,cid, command, args):
try:
self.commands[command](cid,args)
except Exception as e:
self.send_formatted_message(cid,self.get_sorry_message())
self.log.error(e)
def get_torrent_list(self, cid, args):
if not self.tc:
self.connect()
message = 'Lista torrent:'
tlist = self.tc.get_torrents()
for torrent in tlist:
message = '%s\nid:*%s) %s %s*' % (message, torrent.id, torrent.name, torrent.status)
message = '%s\n' % message
self.send_formatted_message(cid, message)
def get_torrent_info(self, cid, args):
if not self.tc:
self.connect()
if len(args) >= 1:
for torrent in args:
data = self.get_formatted_torrent_data(torrent)
self.send_formatted_message(cid, data)
else:
self.send_formatted_message(cid,
'*torrent list* usage: torrent list _[TORRENT NUMBER]_...')
def remove_torrent(self, cid, args):
if not self.tc:
self.connect()
if len(args) >= 1:
for torrent in args:
data = self.remove_single_torrent(torrent)
self.send_formatted_message(cid, data)
else:
self.send_formatted_message(cid,
'*torrent remove* usage: torrent remove _[TORRENT NUMBER]_...')
def remove_single_torrent(self, torrent):
result = 'Removed torrent with id=*%s*'
result = result % torrent
return result
def start_torrent(self, cid, args):
if not self.tc:
self.connect()
if len(args) >= 1:
for torrent in args:
data = self.start_single_torrent(torrent)
self.send_formatted_message(cid, data)
else:
self.send_formatted_message(cid,
'*torrent start* usage: torrent start _[TORRENT NUMBER]_...')
def start_single_torrent(self, torrent):
result = 'Started torrent with id=*%s*'
self.tc.start_torrent(torrent)
result = result % torrent
return result
def stop_torrent(self, cid, args):
if not self.tc:
self.connect()
if len(args) >= 1:
for torrent in args:
data = self.stop_single_torrent(torrent)
self.send_formatted_message(cid, data)
else:
self.send_formatted_message(cid,
'*torrent stop* usage: torrent stop _[TORRENT NUMBER]_...')
def stop_single_torrent(self, torrent):
result = 'Stopped torrent with id=*%s*'
self.tc.stop_torrent(torrent)
result = result % torrent
return result
def add_torrent(self, cid, args):
if not self.tc:
self.connect()
if len(args) >= 1:
for torrent in args:
data = self.add_single_torrent(torrent)
self.send_formatted_message(cid, data)
else:
self.send_formatted_message(cid,
'*torrent add* usage: torrent list _[TORRENT FILE URI]_...')
def add_single_torrent(self, torrent):
result = 'Added *%s* with id=*%s*'
t = self.tc.add_torrent(torrent);
result = result % (t.name,t.id)
return result
def get_formatted_torrent_data(self, torrent):
result = 'id:*%s* name:*%s* status:*%s *progress:*%d%%*'
t = self.tc.get_torrent(int(torrent))
result = result % (t.id,t.name,t.status,int(t.progress))
return result
def start_all(self,cid,args):
if not self.tc:
self.connect()
self.tc.start_all();
self.send_formatted_message(cid,'Started *all* torrents')
def stop_all(self,cid,args):
if not self.tc:
self.connect()
self.tc.stop_all();
self.send_formatted_message(cid,'Stopped *all* torrents')
| mit |
mcardillo55/django | django/contrib/auth/urls.py | 568 | 1036 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^password_change/$', views.password_change, name='password_change'),
url(r'^password_change/done/$', views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', views.password_reset_complete, name='password_reset_complete'),
]
| bsd-3-clause |
yunxliu/crosswalk-test-suite | webapi/tct-mediacapture-w3c-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 1 | 14689 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError, GradientBoostingClassifier, n_estimators=0)
assert_raises(ValueError, GradientBoostingClassifier, n_estimators=-1)
assert_raises(ValueError, GradientBoostingClassifier, learn_rate=0.0)
assert_raises(ValueError, GradientBoostingClassifier, learn_rate=-1.0)
assert_raises(ValueError, GradientBoostingRegressor, loss='foobar')
assert_raises(ValueError, GradientBoostingClassifier,
min_samples_split=0.0)
assert_raises(ValueError, GradientBoostingClassifier,
min_samples_split=-1.0)
assert_raises(ValueError, GradientBoostingClassifier, min_samples_leaf=0)
assert_raises(ValueError, GradientBoostingClassifier, min_samples_leaf=-1.)
assert_raises(ValueError, GradientBoostingClassifier, subsample=0.0)
assert_raises(ValueError, GradientBoostingClassifier, subsample=1.1)
assert_raises(ValueError, GradientBoostingClassifier, subsample=-0.1)
assert_raises(ValueError, GradientBoostingClassifier, max_depth=-0.1)
assert_raises(ValueError, GradientBoostingClassifier, max_depth=0)
assert_raises(ValueError, GradientBoostingClassifier, init={})
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# binomial deviance requires ``n_classes == 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='bdeviance').fit(X, y),
X, [0, 0, 1, 1, 2, 2])
# multinomial deviance requires ``n_classes > 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='mdeviance').fit(X, y),
X, [0, 0, 1, 1, 1, 0])
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1,
learn_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.085, \
"GB failed with error %.4f" % error_rate
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learn_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, \
"Stochastic GB failed with error %.4f" % error_rate
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4,
min_samples_split=1, random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and mse = %.4f" % (loss, mse)
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learn_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
min_samples_split=1, random_state=1)
clf.fit(boston.data, boston.target)
feature_importances = clf.feature_importances_
# true feature importance ranking
true_ranking = np.array([3, 1, 8, 10, 2, 9, 4, 11, 0, 6, 7, 5, 12])
assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(ValueError, clf.predict, X_sparse)
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = map(str, y)
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), map(str, true_result))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
| bsd-3-clause |
brokenjacobs/ansible | lib/ansible/modules/remote_management/hpilo/hpilo_facts.py | 33 | 7797 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hpilo_facts
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Gather facts through an HP iLO interface
description:
- This module gathers facts for a specific system using its HP iLO interface.
These facts include hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
requirements:
- hpilo
notes:
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
# Task to gather facts from a HP iLO interface only if the system is an HP server
- hpilo_facts:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- fail:
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ hw_system_serial }}) !'
when: cmdb_serialno != hw_system_serial
'''
RETURN = r'''
# Typical output of HP iLO_facts for a physical system
hw_bios_date:
description: BIOS date
returned: always
type: string
sample: 05/05/2011
hw_bios_version:
description: BIOS version
returned: always
type: string
sample: P68
hw_ethX:
description: Interface information (for each interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:55
macaddress_dash: 00-11-22-33-44-55
hw_eth_ilo:
description: Interface information (for the iLO network interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:BA
- macaddress_dash: 00-11-22-33-44-BA
hw_product_name:
description: Product name
returned: always
type: string
sample: ProLiant DL360 G7
hw_product_uuid:
description: Product UUID
returned: always
type: string
sample: ef50bac8-2845-40ff-81d9-675315501dac
hw_system_serial:
description: System serial number
returned: always
type: string
sample: ABC12345D6
hw_uuid:
description: Hardware UUID
returned: always
type: string
sample: 123456ABC78901D2
'''
import re
import warnings
from ansible.module_utils.basic import AnsibleModule
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
try:
factname = 'hw_eth' + str(int(entry['Port']) - 1)
except:
factname = non_numeric
facts = {
'macaddress': entry['MAC'].replace('-', ':'),
'macaddress_dash': entry['MAC']
}
return (factname, facts)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, type='str'),
login = dict(default='Administrator', type='str'),
password = dict(default='admin', type='str', no_log=True),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
ilo = hpilo.Ilo(host, login=login, password=password)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
data = ilo.get_host_data()
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [ (e['name'], e['value']) for e in entry['fields'] ]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search('(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
module.exit_json(ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
40023154/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/socket.py | 730 | 14913 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from a the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| gpl-2.0 |
mchdks/python-social-auth | social/backends/professionali.py | 73 | 1907 | # -*- coding: utf-8 -*-
"""
Professionaly OAuth 2.0 support.
This contribution adds support for professionaly.ru OAuth 2.0.
Username is retrieved from the identity returned by server.
"""
from time import time
from social.utils import parse_qs
from social.backends.oauth import BaseOAuth2
class ProfessionaliOAuth2(BaseOAuth2):
name = 'professionali'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://api.professionali.ru/oauth/authorize.html'
ACCESS_TOKEN_URL = 'https://api.professionali.ru/oauth/getToken.json'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('avatar_big', 'avatar_big'),
('link', 'link')
]
def get_user_details(self, response):
first_name, last_name = map(response.get, ('firstname', 'lastname'))
email = ''
if self.setting('FAKE_EMAIL'):
email = '{0}@professionali.ru'.format(time())
return {
'username': '{0}_{1}'.format(last_name, first_name),
'first_name': first_name,
'last_name': last_name,
'email': email
}
def user_data(self, access_token, response, *args, **kwargs):
url = 'https://api.professionali.ru/v6/users/get.json'
fields = list(set(['firstname', 'lastname', 'avatar_big', 'link'] +
self.setting('EXTRA_DATA', [])))
params = {
'fields': ','.join(fields),
'access_token': access_token,
'ids[]': response['user_id']
}
try:
return self.get_json(url, params)[0]
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
def get_json(self, url, *args, **kwargs):
return self.request(url, verify=False, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, verify=False, *args, **kwargs).text)
| bsd-3-clause |
marcelovilaca/DIRAC | DataManagementSystem/Utilities/lfc_dfc_copy.py | 14 | 8617 | #########################################################################################
#
# Script to populate the DIRAC FileCatalog with the information from the LFC
# FileCatalog using multiple LFC sources
#
# Author: A.Tsaregorodtsev
# Last Modified: 9.01.2012
#
#########################################################################################
from DIRAC.Core.Base import Script
Script.parseCommandLine()
import DIRAC.Resources.Catalog.LcgFileCatalogClient as LcgFileCatalogClient
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN, getGroupsWithVOMSAttribute
from DIRAC.Core.Utilities.ThreadPool import ThreadPool, ThreadedJob
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC import gConfig, S_OK, S_ERROR
from multiprocessing import Queue, Process, Value, Manager
import time, sys, random
dirCount = 0
fileCount = 0
globalStart = time.time()
dnCache = {}
roleCache = {}
def writer( filename, writerQueue, stopFlag ):
print "entering writer"
outputFile = open( filename, 'w' )
while not stopFlag.value or not writerQueue.empty():
outputFile.write( writerQueue.get() )
outputFile.close()
print "exciting writer stopValue %s" % stopFlag.value
def getUserNameAndGroup(info):
""" Get the user name and group from the DN and VOMS role
"""
global dnCache, roleCache
owner = {}
if not "OwnerDN" in info:
return owner
username = dnCache.get(info.get('OwnerDN'))
if not username:
result = getUsernameForDN(info.get('OwnerDN','Unknown'))
if result['OK']:
username = result['Value']
dnCache[info['OwnerDN']] = username
elif "No username" in result['Message']:
username = 'Unknown'
dnCache[info['OwnerDN']] = username
if username and username != 'Unknown':
groups = roleCache.get('/'+info.get('OwnerRole'))
if not groups:
groups = getGroupsWithVOMSAttribute('/'+info['OwnerRole'])
roleCache['/'+info['OwnerRole']] = groups
if groups:
owner['username'] = username
owner['group'] = groups[0]
return owner
def processDir( initPath, writerQueue, recursive = False, host = None, fcInit = None, dfcInit = None ):
""" Process one directory, possibly recursively
"""
global globalStart, dnCache, roleCache
#print "AT >>> processDir initPath", initPath
fc = fcInit
if not fc:
fc = LcgFileCatalogClient.LcgFileCatalogClient( host=host )
dfc = dfcInit
if not dfc:
dfc = FileCatalogClient()
start = time.time()
resultList = fc.listDirectory(initPath,True)
if not resultList['OK']:
result = S_ERROR("Failed LFC lookup for %s" % initPath)
result['Path'] = initPath
return result
lfc_time = (time.time() - start)
s = time.time()
if resultList['OK']:
# Add directories
if resultList['Value']['Failed']:
result = S_ERROR( "Path %s failed: %s" % ( initPath, resultList['Value']['Failed'][initPath] ) )
return result
dirDict = resultList['Value']['Successful'][initPath]['SubDirs']
paths = {}
for path,info in dirDict.items():
paths[path] = {}
paths[path]['Mode'] = info['Mode']
owner = getUserNameAndGroup( info )
if owner:
paths[path]['Owner'] = owner
p_dirs = time.time() - s
s = time.time()
nDir = len(paths)
if nDir:
# print "Adding %d directories in %s" % ( nDir, initPath )
result = dfc.createDirectory(paths)
if not result['OK']:
print "Error adding directories:%s" % result['Message']
e_dirs = time.time() - s
# Add files
s = time.time()
fileDict = resultList['Value']['Successful'][initPath]['Files']
lfns = {}
for lfn,info in fileDict.items():
#print info['MetaData']
lfns[lfn] = {}
lfns[lfn]['Size'] = info['MetaData']['Size']
lfns[lfn]['Checksum'] = info['MetaData'].get('Checksum','')
lfns[lfn]['GUID'] = info['MetaData']['GUID']
lfns[lfn]['Mode'] = info['MetaData']['Mode']
lfns[lfn]['PFN'] = ''
owner = getUserNameAndGroup( info['MetaData'] )
if owner:
lfns[lfn]['Owner'] = owner
if info['Replicas']:
seList = info['Replicas'].keys()
lfns[lfn]['SE'] = seList
p_files = time.time() - s
s = time.time()
nFile = len(lfns)
nRep = 0
if nFile:
for lfn in lfns:
if 'SE' in lfns[lfn]:
nRep += len(lfns[lfn]['SE'])
# print "Adding %d files in %s" % ( nFile, initPath )
done = False
count = 0
error = False
while not done:
count += 1
result = dfc.addFile(lfns)
if not result['OK']:
print "Error adding files %d:" % count, result['Message']
if count > 10:
print "Completely failed path %s" % initPath
break
error = True
time.sleep(2)
elif error:
print "Successfully added files on retry %d" % count
done = True
else:
done = True
e_files = time.time() - s
dfc_time = time.time() - start - lfc_time
total_time = time.time() - globalStart
format = "== %s: time lfc/dfc %.2f/%.2f, files %d, dirs %d, reps %d, time: %.2f/%.2f/%.2f/%.2f %.2f \n"
writerQueue.put( format % ( initPath, lfc_time, dfc_time, nFile, nDir, nRep, p_dirs, e_dirs, p_files, e_files, total_time ) )
# outputFile = open('lfc_dfc.out','a')
# outputFile.write( format % (initPath,lfc_time,dfc_time,nFile,nDir,nRep,p_dirs,e_dirs,p_files,e_files,total_time) )
# outputFile.close()
# print format % (initPath,lfc_time,dfc_time,nFile,fileCount,nDir,dirCount,p_dirs,e_dirs,p_files,e_files,total_time)
# Go into directories
if recursive:
for path in paths:
result = processDir( path , writerQueue, recursive = True, host = host, fcInit = fc, dfcInit = dfc )
if result['OK']:
nFile += result['Value'].get('NumberOfFiles',0)
nDir += result['Value'].get('NumberOfDirectories',0)
nRep += result['Value'].get('NumberOfReplicas',0)
resultDict = {}
resultDict['NumberOfFiles'] = nFile
resultDict['NumberOfDirectories'] = nDir
resultDict['NumberOfReplicas'] = nRep
resultDict['Path'] = initPath
resultDict['Directories'] = dirDict.keys()
#print "AT >>> processDir",initPath,"done %.2f" % (time.time()-start)
toRet = S_OK( resultDict )
toRet['writerQueue'] = writerQueue
return toRet
def finalizeDirectory(task,result):
global lfcHosts, pPool
if result['OK']:
writerQueue = result['writerQueue']
print "Finished directory %(Path)s, dirs: %(NumberOfDirectories)s, files: %(NumberOfFiles)s, replicas: %(NumberOfReplicas)s" % result['Value']
print "%d active tasks remaining" % pPool.getNumWorkingProcesses()
if "Directories" in result['Value']:
for path in result['Value']['Directories']:
random.shuffle(lfcHosts)
#print pPool.getNumWorkingProcesses(), pPool.hasPendingTasks()
print "Queueing task for directory %s, lfc %s" % ( path, lfcHosts[0] )
result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
print "Failed queueing %s" % path
else:
print "Task failed: %s" % result['Message']
if 'Path' in result:
random.shuffle(lfcHosts)
print "Requeueing task for directory %s, lfc %s" % ( result['Path'], lfcHosts[0] )
#########################################################################
pPool = ProcessPool(30,40,0)
manager = Manager()
writerQueue = manager.Queue()
stopFlag = Value( 'i', 0 )
#pPool.daemonize()
# lfcHosts = ['lfc-lhcb-ro.cern.ch',
# 'lfc-lhcb-ro.cr.cnaf.infn.it',
# 'lhcb-lfc-fzk.gridka.de',
# 'lfc-lhcb-ro.in2p3.fr',
# 'lfc-lhcb.grid.sara.nl',
# 'lfclhcb.pic.es',
# 'lhcb-lfc.gridpp.rl.ac.uk']
lfcHosts = ['prod-lfc-lhcb-ro.cern.ch']
# path = "/lhcb/LHCb"
path = '/lhcb/user/c/chaen'
print "Queueing task for directory", path, lfcHosts[0]
writerProc = Process( target = writer, args = ( 'lfc_dfc.out', writerQueue, stopFlag ) )
writerProc.start()
result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
print "Failed queueing", path
for i in range(20):
pPool.processResults()
time.sleep(1)
pPool.processAllResults( timeout = 300 )
stopFlag.value = 1
writerQueue.put( "Exit" )
writerProc.join()
| gpl-3.0 |
PGower/PyCanvas | pycanvas/apis/course_audit_log.py | 1 | 11942 | """CourseAuditLog API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from base import BaseCanvasAPI
from base import BaseModel
class CourseAuditLogAPI(BaseCanvasAPI):
"""CourseAuditLog API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for CourseAuditLogAPI."""
super(CourseAuditLogAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("pycanvas.CourseAuditLogAPI")
def query_by_course(self, course_id, end_time=None, start_time=None):
"""
Query by course.
List course change events for a given course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/course/courses/{course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/course/courses/{course_id}".format(**path), data=data, params=params, all_pages=True)
class Createdeventdata(BaseModel):
"""Createdeventdata Model.
The created event data object returns all the fields that were set in the format of the following example. If a field does not exist it was not set. The value of each field changed is in the format of [:old_value, :new_value]. The created event type also includes a created_source field to specify what triggered the creation of the course."""
def __init__(self, is_public=None, conclude_at=None, start_at=None, name=None, created_source=None):
"""Init method for Createdeventdata class."""
self._is_public = is_public
self._conclude_at = conclude_at
self._start_at = start_at
self._name = name
self._created_source = created_source
self.logger = logging.getLogger('pycanvas.Createdeventdata')
@property
def is_public(self):
"""is_public."""
return self._is_public
@is_public.setter
def is_public(self, value):
"""Setter for is_public property."""
self.logger.warn("Setting values on is_public will NOT update the remote Canvas instance.")
self._is_public = value
@property
def conclude_at(self):
"""conclude_at."""
return self._conclude_at
@conclude_at.setter
def conclude_at(self, value):
"""Setter for conclude_at property."""
self.logger.warn("Setting values on conclude_at will NOT update the remote Canvas instance.")
self._conclude_at = value
@property
def start_at(self):
"""start_at."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def name(self):
"""name."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
@property
def created_source(self):
"""The type of action that triggered the creation of the course."""
return self._created_source
@created_source.setter
def created_source(self, value):
"""Setter for created_source property."""
self.logger.warn("Setting values on created_source will NOT update the remote Canvas instance.")
self._created_source = value
class Courseevent(BaseModel):
"""Courseevent Model."""
def __init__(self, event_source=None, event_type=None, links=None, created_at=None, id=None, event_data=None):
"""Init method for Courseevent class."""
self._event_source = event_source
self._event_type = event_type
self._links = links
self._created_at = created_at
self._id = id
self._event_data = event_data
self.logger = logging.getLogger('pycanvas.Courseevent')
@property
def event_source(self):
"""Course event source depending on the event type. This will return a string containing the source of the event."""
return self._event_source
@event_source.setter
def event_source(self, value):
"""Setter for event_source property."""
self.logger.warn("Setting values on event_source will NOT update the remote Canvas instance.")
self._event_source = value
@property
def event_type(self):
"""Course event type The event type defines the type and schema of the event_data object."""
return self._event_type
@event_type.setter
def event_type(self, value):
"""Setter for event_type property."""
self.logger.warn("Setting values on event_type will NOT update the remote Canvas instance.")
self._event_type = value
@property
def links(self):
"""Jsonapi.org links."""
return self._links
@links.setter
def links(self, value):
"""Setter for links property."""
self.logger.warn("Setting values on links will NOT update the remote Canvas instance.")
self._links = value
@property
def created_at(self):
"""timestamp of the event."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def id(self):
"""ID of the event."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def event_data(self):
"""Course event data depending on the event type. This will return an object containing the relevant event data. An updated event type will return an UpdatedEventData object."""
return self._event_data
@event_data.setter
def event_data(self, value):
"""Setter for event_data property."""
self.logger.warn("Setting values on event_data will NOT update the remote Canvas instance.")
self._event_data = value
class Courseeventlink(BaseModel):
"""Courseeventlink Model."""
def __init__(self, copied_from=None, course=None, sis_batch=None, user=None, copied_to=None, page_view=None):
"""Init method for Courseeventlink class."""
self._copied_from = copied_from
self._course = course
self._sis_batch = sis_batch
self._user = user
self._copied_to = copied_to
self._page_view = page_view
self.logger = logging.getLogger('pycanvas.Courseeventlink')
@property
def copied_from(self):
"""ID of the course that this course was copied from. This is only included if the event_type is copied_from."""
return self._copied_from
@copied_from.setter
def copied_from(self, value):
"""Setter for copied_from property."""
self.logger.warn("Setting values on copied_from will NOT update the remote Canvas instance.")
self._copied_from = value
@property
def course(self):
"""ID of the course for the event."""
return self._course
@course.setter
def course(self, value):
"""Setter for course property."""
self.logger.warn("Setting values on course will NOT update the remote Canvas instance.")
self._course = value
@property
def sis_batch(self):
"""ID of the SIS batch that triggered the event."""
return self._sis_batch
@sis_batch.setter
def sis_batch(self, value):
"""Setter for sis_batch property."""
self.logger.warn("Setting values on sis_batch will NOT update the remote Canvas instance.")
self._sis_batch = value
@property
def user(self):
"""ID of the user for the event (who made the change)."""
return self._user
@user.setter
def user(self, value):
"""Setter for user property."""
self.logger.warn("Setting values on user will NOT update the remote Canvas instance.")
self._user = value
@property
def copied_to(self):
"""ID of the course that this course was copied to. This is only included if the event_type is copied_to."""
return self._copied_to
@copied_to.setter
def copied_to(self, value):
"""Setter for copied_to property."""
self.logger.warn("Setting values on copied_to will NOT update the remote Canvas instance.")
self._copied_to = value
@property
def page_view(self):
"""ID of the page view during the event if it exists."""
return self._page_view
@page_view.setter
def page_view(self, value):
"""Setter for page_view property."""
self.logger.warn("Setting values on page_view will NOT update the remote Canvas instance.")
self._page_view = value
class Updatedeventdata(BaseModel):
"""Updatedeventdata Model.
The updated event data object returns all the fields that have changed in the format of the following example. If a field does not exist it was not changed. The value is an array that contains the before and after values for the change as in [:old_value, :new_value]."""
def __init__(self, is_public=None, conclude_at=None, start_at=None, name=None):
"""Init method for Updatedeventdata class."""
self._is_public = is_public
self._conclude_at = conclude_at
self._start_at = start_at
self._name = name
self.logger = logging.getLogger('pycanvas.Updatedeventdata')
@property
def is_public(self):
"""is_public."""
return self._is_public
@is_public.setter
def is_public(self, value):
"""Setter for is_public property."""
self.logger.warn("Setting values on is_public will NOT update the remote Canvas instance.")
self._is_public = value
@property
def conclude_at(self):
"""conclude_at."""
return self._conclude_at
@conclude_at.setter
def conclude_at(self, value):
"""Setter for conclude_at property."""
self.logger.warn("Setting values on conclude_at will NOT update the remote Canvas instance.")
self._conclude_at = value
@property
def start_at(self):
"""start_at."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def name(self):
"""name."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
| mit |
cpennington/edx-platform | common/lib/xmodule/xmodule/tests/test_services.py | 3 | 6030 | """
Tests for SettingsService
"""
import unittest
from django.test import TestCase
import ddt
import mock
from config_models.models import ConfigurationModel
from django.conf import settings
from django.test.utils import override_settings
from xblock.runtime import Mixologist
from opaque_keys.edx.locator import CourseLocator
from xmodule.services import ConfigurationService, SettingsService, TeamsConfigurationService
from openedx.core.lib.teams_config import TeamsConfig
class _DummyBlock(object):
""" Dummy Xblock class """
pass
class DummyConfig(ConfigurationModel):
"""
Dummy Configuration
"""
class Meta:
app_label = 'xmoduletestservices'
class DummyUnexpected(object):
"""
Dummy Unexpected Class
"""
pass
@ddt.ddt
class TestSettingsService(unittest.TestCase):
""" Test SettingsService """
xblock_setting_key1 = 'dummy_block'
xblock_setting_key2 = 'other_dummy_block'
def setUp(self):
""" Setting up tests """
super(TestSettingsService, self).setUp()
self.settings_service = SettingsService()
self.xblock_mock = mock.Mock()
self.xblock_mock.block_settings_key = self.xblock_setting_key1
self.xblock_mock.unmixed_class = mock.Mock()
self.xblock_mock.unmixed_class.__name__ = self.xblock_setting_key2
def test_get_given_none_throws_value_error(self):
""" Test that given None throws value error """
with self.assertRaises(ValueError):
self.settings_service.get_settings_bucket(None)
def test_get_return_default_if_xblock_settings_is_missing(self):
""" Test that returns default (or None if default not set) if XBLOCK_SETTINGS is not set """
self.assertFalse(hasattr(settings, 'XBLOCK_SETTINGS')) # precondition check
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock, 'zzz'), 'zzz')
def test_get_return_empty_dictionary_if_xblock_settings_and_default_is_missing(self):
""" Test that returns default (or None if default not set) if XBLOCK_SETTINGS is not set """
self.assertFalse(hasattr(settings, 'XBLOCK_SETTINGS')) # precondition check
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock), {})
@override_settings(XBLOCK_SETTINGS={xblock_setting_key2: {'b': 1}})
def test_get_returns_none_or_default_if_bucket_not_found(self):
""" Test if settings service returns default if setting not found """
self.assertEqual(settings.XBLOCK_SETTINGS, {self.xblock_setting_key2: {'b': 1}})
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock), {})
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock, 123), 123)
@override_settings(XBLOCK_SETTINGS={xblock_setting_key1: 42})
def test_get_returns_correct_value(self):
""" Test if settings service returns correct bucket """
self.assertEqual(settings.XBLOCK_SETTINGS, {self.xblock_setting_key1: 42})
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock), 42)
@override_settings(XBLOCK_SETTINGS={xblock_setting_key2: "I'm a setting"})
def test_get_respects_block_settings_key(self):
""" Test if settings service respects block_settings_key value """
self.assertEqual(settings.XBLOCK_SETTINGS, {self.xblock_setting_key2: "I'm a setting"})
self.xblock_mock.block_settings_key = self.xblock_setting_key2
self.assertEqual(self.settings_service.get_settings_bucket(self.xblock_mock), "I'm a setting")
@override_settings(XBLOCK_SETTINGS={_DummyBlock.__name__: [1, 2, 3]})
def test_get_uses_class_name_if_block_settings_key_is_not_set(self):
""" Test if settings service uses class name if block_settings_key attribute does not exist """
mixologist = Mixologist([])
block = mixologist.mix(_DummyBlock)
self.assertEqual(settings.XBLOCK_SETTINGS, {"_DummyBlock": [1, 2, 3]})
self.assertEqual(self.settings_service.get_settings_bucket(block), [1, 2, 3])
class TestConfigurationService(unittest.TestCase):
"""
Tests for ConfigurationService
"""
def test_given_unexpected_class_throws_value_error(self):
"""
Test that instantiating ConfigurationService raises exception on passing
a class which is not subclass of ConfigurationModel.
"""
with self.assertRaises(ValueError):
ConfigurationService(DummyUnexpected)
def test_configuration_service(self):
"""
Test the correct configuration on instantiating ConfigurationService.
"""
config_service = ConfigurationService(DummyConfig)
self.assertEqual(config_service.configuration, DummyConfig)
class MockConfigurationService(TeamsConfigurationService):
"""
Mock ConfigurationService for testing.
"""
def __init__(self, course, **kwargs):
super(MockConfigurationService, self).__init__()
self._course = course
def get_course(self, course_id):
return self._course
class ConfigurationServiceBaseClass(TestCase):
"""
Base test class for testing the ConfigurationService.
"""
def setUp(self):
super(ConfigurationServiceBaseClass, self).setUp()
self.teams_config = TeamsConfig(
{'max_size': 2, 'topics': [{'id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]}
)
self.course = mock.Mock(
id=CourseLocator('org_0', 'course_0', 'run_0'),
teams_configuration=self.teams_config
)
self.configuration_service = MockConfigurationService(self.course)
class TestTeamsConfigurationService(ConfigurationServiceBaseClass):
"""
Test operations of the teams configuration service
"""
def test_get_teamsconfiguration(self):
teams_config = self.configuration_service.get_teams_configuration(self.course.id)
self.assertEqual(teams_config, self.teams_config)
| agpl-3.0 |
Senseg/Py4A | python3-alpha/extra_modules/gdata/Crypto/Protocol/Chaffing.py | 44 | 9496 | """This file implements the chaffing algorithm.
Winnowing and chaffing is a technique for enhancing privacy without requiring
strong encryption. In short, the technique takes a set of authenticated
message blocks (the wheat) and adds a number of chaff blocks which have
randomly chosen data and MAC fields. This means that to an adversary, the
chaff blocks look as valid as the wheat blocks, and so the authentication
would have to be performed on every block. By tailoring the number of chaff
blocks added to the message, the sender can make breaking the message
computationally infeasible. There are many other interesting properties of
the winnow/chaff technique.
For example, say Alice is sending a message to Bob. She packetizes the
message and performs an all-or-nothing transformation on the packets. Then
she authenticates each packet with a message authentication code (MAC). The
MAC is a hash of the data packet, and there is a secret key which she must
share with Bob (key distribution is an exercise left to the reader). She then
adds a serial number to each packet, and sends the packets to Bob.
Bob receives the packets, and using the shared secret authentication key,
authenticates the MACs for each packet. Those packets that have bad MACs are
simply discarded. The remainder are sorted by serial number, and passed
through the reverse all-or-nothing transform. The transform means that an
eavesdropper (say Eve) must acquire all the packets before any of the data can
be read. If even one packet is missing, the data is useless.
There's one twist: by adding chaff packets, Alice and Bob can make Eve's job
much harder, since Eve now has to break the shared secret key, or try every
combination of wheat and chaff packet to read any of the message. The cool
thing is that Bob doesn't need to add any additional code; the chaff packets
are already filtered out because their MACs don't match (in all likelihood --
since the data and MACs for the chaff packets are randomly chosen it is
possible, but very unlikely that a chaff MAC will match the chaff data). And
Alice need not even be the party adding the chaff! She could be completely
unaware that a third party, say Charles, is adding chaff packets to her
messages as they are transmitted.
For more information on winnowing and chaffing see this paper:
Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption"
http://theory.lcs.mit.edu/~rivest/chaffing.txt
"""
__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $"
from Crypto.Util.number import bytes_to_long
class Chaff:
"""Class implementing the chaff adding algorithm.
Methods for subclasses:
_randnum(size):
Returns a randomly generated number with a byte-length equal
to size. Subclasses can use this to implement better random
data and MAC generating algorithms. The default algorithm is
probably not very cryptographically secure. It is most
important that the chaff data does not contain any patterns
that can be used to discern it from wheat data without running
the MAC.
"""
def __init__(self, factor=1.0, blocksper=1):
"""Chaff(factor:float, blocksper:int)
factor is the number of message blocks to add chaff to,
expressed as a percentage between 0.0 and 1.0. blocksper is
the number of chaff blocks to include for each block being
chaffed. Thus the defaults add one chaff block to every
message block. By changing the defaults, you can adjust how
computationally difficult it could be for an adversary to
brute-force crack the message. The difficulty is expressed
as:
pow(blocksper, int(factor * number-of-blocks))
For ease of implementation, when factor < 1.0, only the first
int(factor*number-of-blocks) message blocks are chaffed.
"""
if not (0.0<=factor<=1.0):
raise ValueError("'factor' must be between 0.0 and 1.0")
if blocksper < 0:
raise ValueError("'blocksper' must be zero or more")
self.__factor = factor
self.__blocksper = blocksper
def chaff(self, blocks):
"""chaff( [(serial-number:int, data:string, MAC:string)] )
: [(int, string, string)]
Add chaff to message blocks. blocks is a list of 3-tuples of the
form (serial-number, data, MAC).
Chaff is created by choosing a random number of the same
byte-length as data, and another random number of the same
byte-length as MAC. The message block's serial number is
placed on the chaff block and all the packet's chaff blocks
are randomly interspersed with the single wheat block. This
method then returns a list of 3-tuples of the same form.
Chaffed blocks will contain multiple instances of 3-tuples
with the same serial number, but the only way to figure out
which blocks are wheat and which are chaff is to perform the
MAC hash and compare values.
"""
chaffedblocks = []
# count is the number of blocks to add chaff to. blocksper is the
# number of chaff blocks to add per message block that is being
# chaffed.
count = len(blocks) * self.__factor
blocksper = list(range(self.__blocksper))
for i, wheat in map(None, list(range(len(blocks))), blocks):
# it shouldn't matter which of the n blocks we add chaff to, so for
# ease of implementation, we'll just add them to the first count
# blocks
if i < count:
serial, data, mac = wheat
datasize = len(data)
macsize = len(mac)
addwheat = 1
# add chaff to this block
for j in blocksper:
import sys
chaffdata = self._randnum(datasize)
chaffmac = self._randnum(macsize)
chaff = (serial, chaffdata, chaffmac)
# mix up the order, if the 5th bit is on then put the
# wheat on the list
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
chaffedblocks.append(wheat)
addwheat = 0
chaffedblocks.append(chaff)
if addwheat:
chaffedblocks.append(wheat)
else:
# just add the wheat
chaffedblocks.append(wheat)
return chaffedblocks
def _randnum(self, size):
# TBD: Not a very secure algorithm.
# TBD: size * 2 to work around possible bug in RandomPool
from Crypto.Util import randpool
import time
pool = randpool.RandomPool(size * 2)
while size > pool.entropy:
pass
# we now have enough entropy in the pool to get size bytes of random
# data... well, probably
return pool.get_bytes(size)
if __name__ == '__main__':
text = """\
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
print('Original text:\n==========')
print(text)
print('==========')
# first transform the text into packets
blocks = [] ; size = 40
for i in range(0, len(text), size):
blocks.append( text[i:i+size] )
# now get MACs for all the text blocks. The key is obvious...
print('Calculating MACs...')
from Crypto.Hash import HMAC, SHA
key = 'Jefferson'
macs = [HMAC.new(key, block, digestmod=SHA).digest()
for block in blocks]
assert len(blocks) == len(macs)
# put these into a form acceptable as input to the chaffing procedure
source = []
m = map(None, list(range(len(blocks))), blocks, macs)
print(m)
for i, data, mac in m:
source.append((i, data, mac))
# now chaff these
print('Adding chaff...')
c = Chaff(factor=0.5, blocksper=2)
chaffed = c.chaff(source)
from base64 import encodestring
# print the chaffed message blocks. meanwhile, separate the wheat from
# the chaff
wheat = []
print('chaffed message blocks:')
for i, data, mac in chaffed:
# do the authentication
h = HMAC.new(key, data, digestmod=SHA)
pmac = h.digest()
if pmac == mac:
tag = '-->'
wheat.append(data)
else:
tag = ' '
# base64 adds a trailing newline
print(tag, '%3d' % i, \
repr(data), encodestring(mac)[:-1])
# now decode the message packets and check it against the original text
print('Undigesting wheat...')
newtext = "".join(wheat)
if newtext == text:
print('They match!')
else:
print('They differ!')
| apache-2.0 |
dtudares/hello-world | yardstick/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py | 1 | 2042 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.scenarios.networking.networkcapacity.NetworkCapacity
import mock
import unittest
import os
import json
from yardstick.benchmark.scenarios.networking import networkcapacity
SAMPLE_OUTPUT = '{"Number of connections":"308","Number of frames received": "166503"}'
@mock.patch('yardstick.benchmark.scenarios.networking.networkcapacity.ssh')
class NetworkCapacityTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '172.16.0.137',
'user': 'cirros',
'password': "root"
},
}
self.result = {}
def test_capacity_successful_setup(self, mock_ssh):
c = networkcapacity.NetworkCapacity({}, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
self.assertIsNotNone(c.client)
self.assertTrue(c.setup_done)
def test_capacity_successful(self, mock_ssh):
c = networkcapacity.NetworkCapacity({}, self.ctx)
mock_ssh.SSH().execute.return_value = (0, SAMPLE_OUTPUT, '')
c.run(self.result)
expected_result = json.loads(SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_unsuccessful_script_error(self, mock_ssh):
c = networkcapacity.NetworkCapacity({}, self.ctx)
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, self.result)
| apache-2.0 |
chand3040/sree_odoo | openerp/addons/hw_posbox_homepage/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tux-00/ansible | lib/ansible/modules/network/f5/bigip_gtm_wide_ip.py | 49 | 5381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Michael Perzel
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- Michael Perzel (@perzizzle)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=mysecret
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5_utils import bigip_api, f5_argument_spec
def get_wide_ip_lb_method(api, wide_ip):
lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def get_wide_ip_pools(api, wide_ip):
try:
return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
except Exception:
e = get_exception()
print(e)
def wide_ip_exists(api, wide_ip):
# hack to determine if wide_ip exists
result = False
try:
api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def set_wide_ip_lb_method(api, wide_ip, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
def main():
argument_spec = f5_argument_spec()
lb_method_choices = ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
meta_args = dict(
lb_method = dict(type='str', required=True, choices=lb_method_choices),
wide_ip = dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
wide_ip = module.params['wide_ip']
lb_method = module.params['lb_method']
validate_certs = module.params['validate_certs']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not wide_ip_exists(api, wide_ip):
module.fail_json(msg="wide ip %s does not exist" % wide_ip)
if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
if not module.check_mode:
set_wide_ip_lb_method(api, wide_ip, lb_method)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
catapult-project/catapult | third_party/gsutil/gslib/tests/mock_logging_handler.py | 5 | 1308 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock logging handler to check for expected logs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import logging
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
| bsd-3-clause |
dgaiero/cpe101-proj4 | funcsTests.py | 1 | 7252 | # Project 4 – Word Puzzle
#
# Name: Dominic Gaiero and Russell Caletena
# Instructor: S. Einakian
# Section: 05
# https://github.com/dgaiero/cpe101-proj4
# # ######
# # # #### ##### ##### # # # # ###### ###### # ######
# # # # # # # # # # # # # # # # #
# # # # # # # # # ###### # # # # # #####
# # # # # ##### # # # # # # # # #
# # # # # # # # # # # # # # # #
## ## #### # # ##### # #### ###### ###### ###### ######
#######
# ###### #### ##### # # # ####
# # # # # ## # # #
# ##### #### # # # # # #
# # # # # # # # # ###
# # # # # # # ## # #
# ###### #### # # # # ####
# ========================================================
# Import libraries
# ========================================================
import unittest
from funcs import *
class TestCases(unittest.TestCase):
def test_formatLetters(self):
self.assertEqual(formatLetters("WAQHGTTWEECBMIVQQELSAZXWKWIIILLDWLFXPIPVPONDTMVAMNOEDSOYQGOBLGQCKGMMCTYCSLOACUZMXVDMGSXCYZUUIUNIXFNU"), [
'WAQHGTTWEE', 'CBMIVQQELS', 'AZXWKWIIIL', 'LDWLFXPIPV', 'PONDTMVAMN', 'OEDSOYQGOB', 'LGQCKGMMCT', 'YCSLOACUZM', 'XVDMGSXCYZ', 'UUIUNIXFNU'])
self.assertEqual(formatLetters("EOARBRNIABZEBRAEBRBHARRACCOONRAACBRRCHECCNABOZOBKABONIRBBNCAEERTCBRAIAABCERICRHRBOIORORCCOBOAAKRKEAR"), [
'EOARBRNIAB', 'ZEBRAEBRBH', 'ARRACCOONR', 'AACBRRCHEC', 'CNABOZOBKA', 'BONIRBBNCA', 'EERTCBRAIA', 'ABCERICRHR', 'BOIORORCCO', 'BOAAKRKEAR'])
self.assertEqual(formatLetters("LLARSHAHLCAOOLLAMILLOIDNALHGIHRBAMCETUHSSMOSKAGETRCORCHORROAIDBSLSAAOMIGOSMONDFLHHNGMSDCMACMIRRSMLHP"), [
'LLARSHAHLC', 'AOOLLAMILL', 'OIDNALHGIH', 'RBAMCETUHS', 'SMOSKAGETR', 'CORCHORROA', 'IDBSLSAAOM', 'IGOSMONDFL', 'HHNGMSDCMA', 'CMIRRSMLHP'])
def test_makeRows(self):
self.assertEqual(makeRows(['WAQHGTTWEE', 'CBMIVQQELS', 'AZXWKWIIIL', 'LDWLFXPIPV', 'PONDTMVAMN', 'OEDSOYQGOB', 'LGQCKGMMCT', 'YCSLOACUZM', 'XVDMGSXCYZ', 'UUIUNIXFNU']), [
'WCALPOLYXU', 'ABZDOEGCVU', 'QMXWNDQSDI', 'HIWLDSCLMU', 'GVKFTOKOGN', 'TQWXMYGASI', 'TQIPVQMCXX', 'WEIIAGMUCF', 'ELIPMOCZYN', 'ESLVNBTMZU'])
self.assertEqual(makeRows(['EOARBRNIAB', 'ZEBRAEBRBH', 'ARRACCOONR', 'AACBRRCHEC', 'CNABOZOBKA', 'BONIRBBNCA', 'EERTCBRAIA', 'ABCERICRHR', 'BOIORORCCO', 'BOAAKRKEAR']),
['EZAACBEABB', 'OERANOEBOO', 'ABRCANRCIA', 'RRABBITEOA', 'BACRORCRRK', 'RECRZBBIOR', 'NBOCOBRCRK', 'IROHBNARCE', 'ABNEKCIHCA', 'BHRCAAAROR'])
self.assertEqual(makeRows(['LLARSHAHLC', 'AOOLLAMILL', 'OIDNALHGIH', 'RBAMCETUHS', 'SMOSKAGETR', 'CORCHORROA', 'IDBSLSAAOM', 'IGOSMONDFL', 'HHNGMSDCMA', 'CMIRRSMLHP']),
['LAORSCIIHC', 'LOIBMODGHM', 'AODAORBONI', 'RLNMSCSSGR', 'SLACKHLMMR', 'HALEAOSOSS', 'AMHTGRANDM', 'HIGUERADCL', 'LLIHTOOFMH', 'CLHSRAMLAP'])
def test_checkLetters(self):
self.assertEqual(checkLetters(['WAQHGTTWEE', 'CBMIVQQELS', 'AZXWKWIIIL', 'LDWLFXPIPV', 'PONDTMVAMN', 'OEDSOYQGOB', 'LGQCKGMMCT', 'YCSLOACUZM', 'XVDMGSXCYZ', 'UUIUNIXFNU'],
['UNIX', 'CALPOLY', 'GCC', 'SLO', 'COMPILE', 'VIM', 'TEST'],
1,
0,
[[],[]]),
[['SLO', 'UNIX'], [{'word': 'SLO', 'row': 7, 'col': 2, 'dir': 1, 'reverse': 0}, {'word': 'UNIX', 'row': 9, 'col': 3, 'dir': 1, 'reverse': 0}]])
self.assertEqual(checkLetters(['EOARBRNIAB', 'ZEBRAEBRBH', 'ARRACCOONR', 'AACBRRCHEC', 'CNABOZOBKA', 'BONIRBBNCA', 'EERTCBRAIA', 'ABCERICRHR', 'BOIORORCCO', 'BOAAKRKEAR'],
['CHICKEN', 'DOG', 'CAT', 'BEAR', 'RABBIT', 'ZEBRA', 'MOUSE', 'RACCOON'],
1,
0,
[[],[]]),
[['ZEBRA', 'RACCOON'], [{'word': 'ZEBRA', 'row': 1, 'col': 0, 'dir': 1, 'reverse': 0}, {'word': 'RACCOON', 'row': 2, 'col': 2, 'dir': 1, 'reverse': 0}]])
self.assertEqual(checkLetters(['WAQHGTTWEE', 'CBMIVQQELS', 'AZXWKWIIIL', 'LDWLFXPIPV', 'PONDTMVAMN', 'OEDSOYQGOB', 'LGQCKGMMCT', 'YCSLOACUZM', 'XVDMGSXCYZ', 'UUIUNIXFNU'],
['XNIU', 'YLOPLAC', 'CCG', 'OLS', 'ELIPMOC', 'MIV', 'TSET'],
1,
1,
[[],[]]),
[['VIM'], [{'word': 'VIM', 'row': 1, 'col': 4, 'dir': 1, 'reverse': 1}]])
self.assertEqual(checkLetters(['WCALPOLYXU', 'ABZDOEGCVU', 'QMXWNDQSDI', 'HIWLDSCLMU', 'GVKFTOKOGN', 'TQWXMYGASI', 'TQIPVQMCXX', 'WEIIAGMUCF', 'ELIPMOCZYN', 'ESLVNBTMZU'],
['UNIX', 'CALPOLY', 'GCC', 'SLO', 'COMPILE', 'VIM', 'TEST'],
2,
0,
[[],[]]),
[['CALPOLY'], [{'word': 'CALPOLY', 'row': 1, 'col': 0, 'dir': 2, 'reverse': 0}]])
self.assertEqual(checkLetters(['LAORSCIIHC', 'LOIBMODGHM', 'AODAORBONI', 'RLNMSCSSGR', 'SLACKHLMMR', 'HALEAOSOSS', 'AMHTGRANDM', 'HIGUERADCL', 'LLIHTOOFMH', 'CLHSRAMLAP'],
['SLACK', 'HIGH', 'HIGHLAND', 'CHORRO', 'PEACH', 'BROAD', 'GRAND', 'OSOS', 'MORRO', 'HIGUERA', 'MARSH', 'FOOTHILL', 'NIPOMO', 'MILL', 'PALM'],
2,
0,
[[],[]]),
[['SLACK', 'OSOS', 'GRAND', 'HIGUERA'], [ {'word': 'SLACK', 'row': 0, 'col': 4, 'dir': 2, 'reverse': 0}, {'word': 'OSOS', 'row': 5, 'col': 5, 'dir': 2, 'reverse': 0},
{'word': 'GRAND', 'row': 4, 'col': 6, 'dir': 2, 'reverse': 0}, {'word': 'HIGUERA', 'row': 0, 'col': 7, 'dir': 2, 'reverse': 0}]])
self.assertEqual(checkLetters(['WCALPOLYXU', 'ABZDOEGCVU', 'QMXWNDQSDI', 'HIWLDSCLMU', 'GVKFTOKOGN', 'TQWXMYGASI', 'TQIPVQMCXX', 'WEIIAGMUCF', 'ELIPMOCZYN', 'ESLVNBTMZU'],
['XNIU', 'YLOPLAC', 'CCG', 'OLS', 'ELIPMOC', 'MIV', 'TSET'],
2,
1,
[[],[]]),
[['COMPILE'], [{'word': 'COMPILE', 'row': 6, 'col': 8, 'dir': 2, 'reverse': 1}]])
def test_reverseWords(self):
self.assertEqual(reverseWords(['UNIX', 'CALPOLY', 'GCC', 'SLO', 'COMPILE', 'VIM', 'TEST']), [
'XINU', 'YLOPLAC', 'CCG', 'OLS', 'ELIPMOC', 'MIV', 'TSET'])
self.assertEqual(reverseWords(['CHICKEN', 'DOG', 'CAT', 'BEAR', 'RABBIT', 'ZEBRA', 'MOUSE', 'RACCOON']),
['NEKCIHC', 'GOD', 'TAC', 'RAEB', 'TIBBAR', 'ARBEZ', 'ESUOM', 'NOOCCAR'])
self.assertEqual(reverseWords(['SLACK', 'HIGH', 'HIGHLAND', 'CHORRO', 'PEACH', 'BROAD', 'GRAND', 'OSOS', 'MORRO', 'HIGUERA', 'MARSH', 'FOOTHILL', 'NIPOMO', 'MILL', 'PALM']),
['KCALS', 'HGIH', 'DNALHGIH', 'ORROHC', 'HCAEP', 'DAORB', 'DNARG', 'SOSO', 'ORROM', 'AREUGIH', 'HSRAM', 'LLIHTOOF', 'OMOPIN', 'LLIM', 'MLAP'])
# Run the unit tests.
if __name__ == '__main__':
unittest.main()
| mit |
ericaschwa/randomOldSideProjects | election_predictor/build/lib/xlrd/formatting.py | 42 | 45335 | # -*- coding: cp1252 -*-
##
# Module for formatting information.
#
# <p>Copyright © 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under
# a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
from __future__ import print_function
DEBUG = 0
import re
from struct import unpack
from .timemachine import *
from .biffh import BaseObject, unpack_unicode, unpack_string, \
upkbits, upkbitsL, fprintf, \
FUN, FDT, FNU, FGE, FTX, XL_CELL_NUMBER, XL_CELL_DATE, \
XL_FORMAT, XL_FORMAT2, \
XLRDError
_cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
excel_default_palette_b5 = (
( 0, 0, 0), (255, 255, 255), (255, 0, 0), ( 0, 255, 0),
( 0, 0, 255), (255, 255, 0), (255, 0, 255), ( 0, 255, 255),
(128, 0, 0), ( 0, 128, 0), ( 0, 0, 128), (128, 128, 0),
(128, 0, 128), ( 0, 128, 128), (192, 192, 192), (128, 128, 128),
(153, 153, 255), (153, 51, 102), (255, 255, 204), (204, 255, 255),
(102, 0, 102), (255, 128, 128), ( 0, 102, 204), (204, 204, 255),
( 0, 0, 128), (255, 0, 255), (255, 255, 0), ( 0, 255, 255),
(128, 0, 128), (128, 0, 0), ( 0, 128, 128), ( 0, 0, 255),
( 0, 204, 255), (204, 255, 255), (204, 255, 204), (255, 255, 153),
(153, 204, 255), (255, 153, 204), (204, 153, 255), (227, 227, 227),
( 51, 102, 255), ( 51, 204, 204), (153, 204, 0), (255, 204, 0),
(255, 153, 0), (255, 102, 0), (102, 102, 153), (150, 150, 150),
( 0, 51, 102), ( 51, 153, 102), ( 0, 51, 0), ( 51, 51, 0),
(153, 51, 0), (153, 51, 102), ( 51, 51, 153), ( 51, 51, 51),
)
excel_default_palette_b2 = excel_default_palette_b5[:16]
# Following table borrowed from Gnumeric 1.4 source.
# Checked against OOo docs and MS docs.
excel_default_palette_b8 = ( # (red, green, blue)
( 0, 0, 0), (255,255,255), (255, 0, 0), ( 0,255, 0), # 0
( 0, 0,255), (255,255, 0), (255, 0,255), ( 0,255,255), # 4
(128, 0, 0), ( 0,128, 0), ( 0, 0,128), (128,128, 0), # 8
(128, 0,128), ( 0,128,128), (192,192,192), (128,128,128), # 12
(153,153,255), (153, 51,102), (255,255,204), (204,255,255), # 16
(102, 0,102), (255,128,128), ( 0,102,204), (204,204,255), # 20
( 0, 0,128), (255, 0,255), (255,255, 0), ( 0,255,255), # 24
(128, 0,128), (128, 0, 0), ( 0,128,128), ( 0, 0,255), # 28
( 0,204,255), (204,255,255), (204,255,204), (255,255,153), # 32
(153,204,255), (255,153,204), (204,153,255), (255,204,153), # 36
( 51,102,255), ( 51,204,204), (153,204, 0), (255,204, 0), # 40
(255,153, 0), (255,102, 0), (102,102,153), (150,150,150), # 44
( 0, 51,102), ( 51,153,102), ( 0, 51, 0), ( 51, 51, 0), # 48
(153, 51, 0), (153, 51,102), ( 51, 51,153), ( 51, 51, 51), # 52
)
default_palette = {
80: excel_default_palette_b8,
70: excel_default_palette_b5,
50: excel_default_palette_b5,
45: excel_default_palette_b2,
40: excel_default_palette_b2,
30: excel_default_palette_b2,
21: excel_default_palette_b2,
20: excel_default_palette_b2,
}
"""
00H = Normal
01H = RowLevel_lv (see next field)
02H = ColLevel_lv (see next field)
03H = Comma
04H = Currency
05H = Percent
06H = Comma [0] (BIFF4-BIFF8)
07H = Currency [0] (BIFF4-BIFF8)
08H = Hyperlink (BIFF8)
09H = Followed Hyperlink (BIFF8)
"""
built_in_style_names = [
"Normal",
"RowLevel_",
"ColLevel_",
"Comma",
"Currency",
"Percent",
"Comma [0]",
"Currency [0]",
"Hyperlink",
"Followed Hyperlink",
]
def initialise_colour_map(book):
book.colour_map = {}
book.colour_indexes_used = {}
if not book.formatting_info:
return
# Add the 8 invariant colours
for i in xrange(8):
book.colour_map[i] = excel_default_palette_b8[i]
# Add the default palette depending on the version
dpal = default_palette[book.biff_version]
ndpal = len(dpal)
for i in xrange(ndpal):
book.colour_map[i+8] = dpal[i]
# Add the specials -- None means the RGB value is not known
# System window text colour for border lines
book.colour_map[ndpal+8] = None
# System window background colour for pattern background
book.colour_map[ndpal+8+1] = None #
for ci in (
0x51, # System ToolTip text colour (used in note objects)
0x7FFF, # 32767, system window text colour for fonts
):
book.colour_map[ci] = None
def nearest_colour_index(colour_map, rgb, debug=0):
# General purpose function. Uses Euclidean distance.
# So far used only for pre-BIFF8 WINDOW2 record.
# Doesn't have to be fast.
# Doesn't have to be fancy.
best_metric = 3 * 256 * 256
best_colourx = 0
for colourx, cand_rgb in colour_map.items():
if cand_rgb is None:
continue
metric = 0
for v1, v2 in zip(rgb, cand_rgb):
metric += (v1 - v2) * (v1 - v2)
if metric < best_metric:
best_metric = metric
best_colourx = colourx
if metric == 0:
break
if 0 and debug:
print("nearest_colour_index for %r is %r -> %r; best_metric is %d" \
% (rgb, best_colourx, colour_map[best_colourx], best_metric))
return best_colourx
##
# This mixin class exists solely so that Format, Font, and XF.... objects
# can be compared by value of their attributes.
class EqNeAttrs(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
##
# An Excel "font" contains the details of not only what is normally
# considered a font, but also several other display attributes.
# Items correspond to those in the Excel UI's Format/Cells/Font tab.
# <br /> -- New in version 0.6.1
class Font(BaseObject, EqNeAttrs):
##
# 1 = Characters are bold. Redundant; see "weight" attribute.
bold = 0
##
# Values: 0 = ANSI Latin, 1 = System default, 2 = Symbol,
# 77 = Apple Roman,
# 128 = ANSI Japanese Shift-JIS,
# 129 = ANSI Korean (Hangul),
# 130 = ANSI Korean (Johab),
# 134 = ANSI Chinese Simplified GBK,
# 136 = ANSI Chinese Traditional BIG5,
# 161 = ANSI Greek,
# 162 = ANSI Turkish,
# 163 = ANSI Vietnamese,
# 177 = ANSI Hebrew,
# 178 = ANSI Arabic,
# 186 = ANSI Baltic,
# 204 = ANSI Cyrillic,
# 222 = ANSI Thai,
# 238 = ANSI Latin II (Central European),
# 255 = OEM Latin I
character_set = 0
##
# An explanation of "colour index" is given in the Formatting
# section at the start of this document.
colour_index = 0
##
# 1 = Superscript, 2 = Subscript.
escapement = 0
##
# 0 = None (unknown or don't care)<br />
# 1 = Roman (variable width, serifed)<br />
# 2 = Swiss (variable width, sans-serifed)<br />
# 3 = Modern (fixed width, serifed or sans-serifed)<br />
# 4 = Script (cursive)<br />
# 5 = Decorative (specialised, for example Old English, Fraktur)
family = 0
##
# The 0-based index used to refer to this Font() instance.
# Note that index 4 is never used; xlrd supplies a dummy place-holder.
font_index = 0
##
# Height of the font (in twips). A twip = 1/20 of a point.
height = 0
##
# 1 = Characters are italic.
italic = 0
##
# The name of the font. Example: u"Arial"
name = UNICODE_LITERAL("")
##
# 1 = Characters are struck out.
struck_out = 0
##
# 0 = None<br />
# 1 = Single; 0x21 (33) = Single accounting<br />
# 2 = Double; 0x22 (34) = Double accounting
underline_type = 0
##
# 1 = Characters are underlined. Redundant; see "underline_type" attribute.
underlined = 0
##
# Font weight (100-1000). Standard values are 400 for normal text
# and 700 for bold text.
weight = 400
##
# 1 = Font is outline style (Macintosh only)
outline = 0
##
# 1 = Font is shadow style (Macintosh only)
shadow = 0
# No methods ...
def handle_efont(book, data): # BIFF2 only
if not book.formatting_info:
return
book.font_list[-1].colour_index = unpack('<H', data)[0]
def handle_font(book, data):
if not book.formatting_info:
return
if not book.encoding:
book.derive_encoding()
blah = DEBUG or book.verbosity >= 2
bv = book.biff_version
k = len(book.font_list)
if k == 4:
f = Font()
f.name = UNICODE_LITERAL('Dummy Font')
f.font_index = k
book.font_list.append(f)
k += 1
f = Font()
f.font_index = k
book.font_list.append(f)
if bv >= 50:
(
f.height, option_flags, f.colour_index, f.weight,
f.escapement, f.underline_type, f.family,
f.character_set,
) = unpack('<HHHHHBBB', data[0:13])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
if bv >= 80:
f.name = unpack_unicode(data, 14, lenlen=1)
else:
f.name = unpack_string(data, 14, book.encoding, lenlen=1)
elif bv >= 30:
f.height, option_flags, f.colour_index = unpack('<HHH', data[0:6])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
f.name = unpack_string(data, 6, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
else: # BIFF2
f.height, option_flags = unpack('<HH', data[0:4])
f.colour_index = 0x7FFF # "system window text colour"
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = 0
f.shadow = 0
f.name = unpack_string(data, 4, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
if blah:
f.dump(
book.logfile,
header="--- handle_font: font[%d] ---" % f.font_index,
footer="-------------------",
)
# === "Number formats" ===
##
# "Number format" information from a FORMAT record.
# <br /> -- New in version 0.6.1
class Format(BaseObject, EqNeAttrs):
##
# The key into Book.format_map
format_key = 0
##
# A classification that has been inferred from the format string.
# Currently, this is used only to distinguish between numbers and dates.
# <br />Values:
# <br />FUN = 0 # unknown
# <br />FDT = 1 # date
# <br />FNU = 2 # number
# <br />FGE = 3 # general
# <br />FTX = 4 # text
type = FUN
##
# The format string
format_str = UNICODE_LITERAL('')
def __init__(self, format_key, ty, format_str):
self.format_key = format_key
self.type = ty
self.format_str = format_str
std_format_strings = {
# "std" == "standard for US English locale"
# #### TODO ... a lot of work to tailor these to the user's locale.
# See e.g. gnumeric-1.x.y/src/formats.c
0x00: "General",
0x01: "0",
0x02: "0.00",
0x03: "#,##0",
0x04: "#,##0.00",
0x05: "$#,##0_);($#,##0)",
0x06: "$#,##0_);[Red]($#,##0)",
0x07: "$#,##0.00_);($#,##0.00)",
0x08: "$#,##0.00_);[Red]($#,##0.00)",
0x09: "0%",
0x0a: "0.00%",
0x0b: "0.00E+00",
0x0c: "# ?/?",
0x0d: "# ??/??",
0x0e: "m/d/yy",
0x0f: "d-mmm-yy",
0x10: "d-mmm",
0x11: "mmm-yy",
0x12: "h:mm AM/PM",
0x13: "h:mm:ss AM/PM",
0x14: "h:mm",
0x15: "h:mm:ss",
0x16: "m/d/yy h:mm",
0x25: "#,##0_);(#,##0)",
0x26: "#,##0_);[Red](#,##0)",
0x27: "#,##0.00_);(#,##0.00)",
0x28: "#,##0.00_);[Red](#,##0.00)",
0x29: "_(* #,##0_);_(* (#,##0);_(* \"-\"_);_(@_)",
0x2a: "_($* #,##0_);_($* (#,##0);_($* \"-\"_);_(@_)",
0x2b: "_(* #,##0.00_);_(* (#,##0.00);_(* \"-\"??_);_(@_)",
0x2c: "_($* #,##0.00_);_($* (#,##0.00);_($* \"-\"??_);_(@_)",
0x2d: "mm:ss",
0x2e: "[h]:mm:ss",
0x2f: "mm:ss.0",
0x30: "##0.0E+0",
0x31: "@",
}
fmt_code_ranges = [ # both-inclusive ranges of "standard" format codes
# Source: the openoffice.org doc't
# and the OOXML spec Part 4, section 3.8.30
( 0, 0, FGE),
( 1, 13, FNU),
(14, 22, FDT),
(27, 36, FDT), # CJK date formats
(37, 44, FNU),
(45, 47, FDT),
(48, 48, FNU),
(49, 49, FTX),
# Gnumeric assumes (or assumed) that built-in formats finish at 49, not at 163
(50, 58, FDT), # CJK date formats
(59, 62, FNU), # Thai number (currency?) formats
(67, 70, FNU), # Thai number (currency?) formats
(71, 81, FDT), # Thai date formats
]
std_format_code_types = {}
for lo, hi, ty in fmt_code_ranges:
for x in xrange(lo, hi+1):
std_format_code_types[x] = ty
del lo, hi, ty, x
date_chars = UNICODE_LITERAL('ymdhs') # year, month/minute, day, hour, second
date_char_dict = {}
for _c in date_chars + date_chars.upper():
date_char_dict[_c] = 5
del _c, date_chars
skip_char_dict = {}
for _c in UNICODE_LITERAL('$-+/(): '):
skip_char_dict[_c] = 1
num_char_dict = {
UNICODE_LITERAL('0'): 5,
UNICODE_LITERAL('#'): 5,
UNICODE_LITERAL('?'): 5,
}
non_date_formats = {
UNICODE_LITERAL('0.00E+00'):1,
UNICODE_LITERAL('##0.0E+0'):1,
UNICODE_LITERAL('General') :1,
UNICODE_LITERAL('GENERAL') :1, # OOo Calc 1.1.4 does this.
UNICODE_LITERAL('general') :1, # pyExcelerator 0.6.3 does this.
UNICODE_LITERAL('@') :1,
}
fmt_bracketed_sub = re.compile(r'\[[^]]*\]').sub
# Boolean format strings (actual cases)
# u'"Yes";"Yes";"No"'
# u'"True";"True";"False"'
# u'"On";"On";"Off"'
def is_date_format_string(book, fmt):
# Heuristics:
# Ignore "text" and [stuff in square brackets (aarrgghh -- see below)].
# Handle backslashed-escaped chars properly.
# E.g. hh\hmm\mss\s should produce a display like 23h59m59s
# Date formats have one or more of ymdhs (caseless) in them.
# Numeric formats have # and 0.
# N.B. u'General"."' hence get rid of "text" first.
# TODO: Find where formats are interpreted in Gnumeric
# TODO: u'[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23)
state = 0
s = ''
for c in fmt:
if state == 0:
if c == UNICODE_LITERAL('"'):
state = 1
elif c in UNICODE_LITERAL(r"\_*"):
state = 2
elif c in skip_char_dict:
pass
else:
s += c
elif state == 1:
if c == UNICODE_LITERAL('"'):
state = 0
elif state == 2:
# Ignore char after backslash, underscore or asterisk
state = 0
assert 0 <= state <= 2
if book.verbosity >= 4:
print("is_date_format_string: reduced format is %s" % REPR(s), file=book.logfile)
s = fmt_bracketed_sub('', s)
if s in non_date_formats:
return False
state = 0
separator = ";"
got_sep = 0
date_count = num_count = 0
for c in s:
if c in date_char_dict:
date_count += date_char_dict[c]
elif c in num_char_dict:
num_count += num_char_dict[c]
elif c == separator:
got_sep = 1
# print num_count, date_count, repr(fmt)
if date_count and not num_count:
return True
if num_count and not date_count:
return False
if date_count:
if book.verbosity:
fprintf(book.logfile,
'WARNING *** is_date_format: ambiguous d=%d n=%d fmt=%r\n',
date_count, num_count, fmt)
elif not got_sep:
if book.verbosity:
fprintf(book.logfile,
"WARNING *** format %r produces constant result\n",
fmt)
return date_count > num_count
def handle_format(self, data, rectype=XL_FORMAT):
DEBUG = 0
bv = self.biff_version
if rectype == XL_FORMAT2:
bv = min(bv, 30)
if not self.encoding:
self.derive_encoding()
strpos = 2
if bv >= 50:
fmtkey = unpack('<H', data[0:2])[0]
else:
fmtkey = self.actualfmtcount
if bv <= 30:
strpos = 0
self.actualfmtcount += 1
if bv >= 80:
unistrg = unpack_unicode(data, 2)
else:
unistrg = unpack_string(data, strpos, self.encoding, lenlen=1)
blah = DEBUG or self.verbosity >= 3
if blah:
fprintf(self.logfile,
"FORMAT: count=%d fmtkey=0x%04x (%d) s=%r\n",
self.actualfmtcount, fmtkey, fmtkey, unistrg)
is_date_s = self.is_date_format_string(unistrg)
ty = [FGE, FDT][is_date_s]
if not(fmtkey > 163 or bv < 50):
# user_defined if fmtkey > 163
# N.B. Gnumeric incorrectly starts these at 50 instead of 164 :-(
# if earlier than BIFF 5, standard info is useless
std_ty = std_format_code_types.get(fmtkey, FUN)
# print "std ty", std_ty
is_date_c = std_ty == FDT
if self.verbosity and 0 < fmtkey < 50 and (is_date_c ^ is_date_s):
DEBUG = 2
fprintf(self.logfile,
"WARNING *** Conflict between "
"std format key %d and its format string %r\n",
fmtkey, unistrg)
if DEBUG == 2:
fprintf(self.logfile,
"ty: %d; is_date_c: %r; is_date_s: %r; fmt_strg: %r",
ty, is_date_c, is_date_s, unistrg)
fmtobj = Format(fmtkey, ty, unistrg)
if blah:
fmtobj.dump(self.logfile,
header="--- handle_format [%d] ---" % (self.actualfmtcount-1, ))
self.format_map[fmtkey] = fmtobj
self.format_list.append(fmtobj)
# =============================================================================
def handle_palette(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
n_colours, = unpack('<H', data[:2])
expected_n_colours = (16, 56)[book.biff_version >= 50]
if ((DEBUG or book.verbosity >= 1)
and n_colours != expected_n_colours):
fprintf(book.logfile,
"NOTE *** Expected %d colours in PALETTE record, found %d\n",
expected_n_colours, n_colours)
elif blah:
fprintf(book.logfile,
"PALETTE record with %d colours\n", n_colours)
fmt = '<xx%di' % n_colours # use i to avoid long integers
expected_size = 4 * n_colours + 2
actual_size = len(data)
tolerance = 4
if not expected_size <= actual_size <= expected_size + tolerance:
raise XLRDError('PALETTE record: expected size %d, actual size %d' % (expected_size, actual_size))
colours = unpack(fmt, data[:expected_size])
assert book.palette_record == [] # There should be only 1 PALETTE record
# a colour will be 0xbbggrr
# IOW, red is at the little end
for i in xrange(n_colours):
c = colours[i]
red = c & 0xff
green = (c >> 8) & 0xff
blue = (c >> 16) & 0xff
old_rgb = book.colour_map[8+i]
new_rgb = (red, green, blue)
book.palette_record.append(new_rgb)
book.colour_map[8+i] = new_rgb
if blah:
if new_rgb != old_rgb:
print("%2d: %r -> %r" % (i, old_rgb, new_rgb), file=book.logfile)
def palette_epilogue(book):
# Check colour indexes in fonts etc.
# This must be done here as FONT records
# come *before* the PALETTE record :-(
for font in book.font_list:
if font.font_index == 4: # the missing font record
continue
cx = font.colour_index
if cx == 0x7fff: # system window text colour
continue
if cx in book.colour_map:
book.colour_indexes_used[cx] = 1
elif book.verbosity:
print("Size of colour table:", len(book.colour_map), file=book.logfile)
fprintf(book.logfile, "*** Font #%d (%r): colour index 0x%04x is unknown\n",
font.font_index, font.name, cx)
if book.verbosity >= 1:
used = sorted(book.colour_indexes_used.keys())
print("\nColour indexes used:\n%r\n" % used, file=book.logfile)
def handle_style(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
bv = book.biff_version
flag_and_xfx, built_in_id, level = unpack('<HBB', data[:4])
xf_index = flag_and_xfx & 0x0fff
if (data == b"\0\0\0\0"
and "Normal" not in book.style_name_map):
# Erroneous record (doesn't have built-in bit set).
# Example file supplied by Jeff Bell.
built_in = 1
built_in_id = 0
xf_index = 0
name = "Normal"
level = 255
elif flag_and_xfx & 0x8000:
# built-in style
built_in = 1
name = built_in_style_names[built_in_id]
if 1 <= built_in_id <= 2:
name += str(level + 1)
else:
# user-defined style
built_in = 0
built_in_id = 0
level = 0
if bv >= 80:
try:
name = unpack_unicode(data, 2, lenlen=2)
except UnicodeDecodeError:
print("STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d" \
% (built_in, xf_index, built_in_id, level), file=book.logfile)
print("raw bytes:", repr(data[2:]), file=book.logfile)
raise
else:
name = unpack_string(data, 2, book.encoding, lenlen=1)
if blah and not name:
print("WARNING *** A user-defined style has a zero-length name", file=book.logfile)
book.style_name_map[name] = (built_in, xf_index)
if blah:
fprintf(book.logfile, "STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d name=%r\n",
built_in, xf_index, built_in_id, level, name)
def check_colour_indexes_in_obj(book, obj, orig_index):
alist = sorted(obj.__dict__.items())
for attr, nobj in alist:
if hasattr(nobj, 'dump'):
check_colour_indexes_in_obj(book, nobj, orig_index)
elif attr.find('colour_index') >= 0:
if nobj in book.colour_map:
book.colour_indexes_used[nobj] = 1
continue
oname = obj.__class__.__name__
print("*** xf #%d : %s.%s = 0x%04x (unknown)" \
% (orig_index, oname, attr, nobj), file=book.logfile)
def fill_in_standard_formats(book):
for x in std_format_code_types.keys():
if x not in book.format_map:
ty = std_format_code_types[x]
# Note: many standard format codes (mostly CJK date formats) have
# format strings that vary by locale; xlrd does not (yet)
# handle those; the type (date or numeric) is recorded but the fmt_str will be None.
fmt_str = std_format_strings.get(x)
fmtobj = Format(x, ty, fmt_str)
book.format_map[x] = fmtobj
def handle_xf(self, data):
### self is a Book instance
# DEBUG = 0
blah = DEBUG or self.verbosity >= 3
bv = self.biff_version
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
# fill in the known standard formats
if bv >= 50 and not self.xfcount:
# i.e. do this once before we process the first XF record
fill_in_standard_formats(self)
if bv >= 80:
unpack_fmt = '<HHHBBBBIiH'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, xf.alignment.rotation, pkd_align2,
pkd_used, pkd_brdbkg1, pkd_brdbkg2, pkd_brdbkg3,
) = unpack(unpack_fmt, data[0:20])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
# Following is not in OOo docs, but is mentioned
# in Gnumeric source and also in (deep breath)
# org.apache.poi.hssf.record.ExtendedFormatRecord.java
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
upkbits(xf.alignment, pkd_align2, (
(0, 0x0f, 'indent_level'),
(4, 0x10, 'shrink_to_fit'),
(6, 0xC0, 'text_direction'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.border, pkd_brdbkg1, (
(0, 0x0000000f, 'left_line_style'),
(4, 0x000000f0, 'right_line_style'),
(8, 0x00000f00, 'top_line_style'),
(12, 0x0000f000, 'bottom_line_style'),
(16, 0x007f0000, 'left_colour_index'),
(23, 0x3f800000, 'right_colour_index'),
(30, 0x40000000, 'diag_down'),
(31, 0x80000000, 'diag_up'),
))
upkbits(xf.border, pkd_brdbkg2, (
(0, 0x0000007F, 'top_colour_index'),
(7, 0x00003F80, 'bottom_colour_index'),
(14, 0x001FC000, 'diag_colour_index'),
(21, 0x01E00000, 'diag_line_style'),
))
upkbitsL(xf.background, pkd_brdbkg2, (
(26, 0xFC000000, 'fill_pattern'),
))
upkbits(xf.background, pkd_brdbkg3, (
(0, 0x007F, 'pattern_colour_index'),
(7, 0x3F80, 'background_colour_index'),
))
elif bv >= 50:
unpack_fmt = '<HHHBBIi'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, pkd_orient_used,
pkd_brdbkg1, pkd_brdbkg2,
) = unpack(unpack_fmt, data[0:16])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
orientation = pkd_orient_used & 0x03
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_orient_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.background, pkd_brdbkg1, (
( 0, 0x0000007F, 'pattern_colour_index'),
( 7, 0x00003F80, 'background_colour_index'),
(16, 0x003F0000, 'fill_pattern'),
))
upkbitsL(xf.border, pkd_brdbkg1, (
(22, 0x01C00000, 'bottom_line_style'),
(25, 0xFE000000, 'bottom_colour_index'),
))
upkbits(xf.border, pkd_brdbkg2, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x00000038, 'left_line_style'),
( 6, 0x000001C0, 'right_line_style'),
( 9, 0x0000FE00, 'top_colour_index'),
(16, 0x007F0000, 'left_colour_index'),
(23, 0x3F800000, 'right_colour_index'),
))
elif bv >= 40:
unpack_fmt = '<BBHBBHI'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align_orient, pkd_used,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align_orient, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x30, 'vert_align'),
))
orientation = (pkd_align_orient & 0xC0) >> 6
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
elif bv == 30:
unpack_fmt = '<BBBBHHI'
(xf.font_index, xf.format_key, pkd_type_prot,
pkd_used, pkd_align_par,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_prot, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_prot, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
))
upkbits(xf.alignment, pkd_align_par, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
))
upkbits(xf, pkd_align_par, (
(4, 0xFFF0, 'parent_style_index'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
elif bv == 21:
#### Warning: incomplete treatment; formatting_info not fully supported.
#### Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16],
#### and create XF[0:16] like the standard ones in BIFF8
#### *AND* add 16 to all XF references in cell records :-(
(xf.font_index, format_etc, halign_etc) = unpack('<BxBB', data)
xf.format_key = format_etc & 0x3F
upkbits(xf.protection, format_etc, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
upkbits(xf.alignment, halign_etc, (
(0, 0x07, 'hor_align'),
))
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = 0 # ???????????
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
else:
raise XLRDError('programmer stuff-up: bv=%d' % bv)
xf.xf_index = len(self.xf_list)
self.xf_list.append(xf)
self.xfcount += 1
if blah:
xf.dump(
self.logfile,
header="--- handle_xf: xf[%d] ---" % xf.xf_index,
footer=" ",
)
try:
fmt = self.format_map[xf.format_key]
cellty = _cellty_from_fmtty[fmt.type]
except KeyError:
cellty = XL_CELL_NUMBER
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions ...
if self.formatting_info:
if self.verbosity and xf.is_style and xf.parent_style_index != 0x0FFF:
msg = "WARNING *** XF[%d] is a style XF but parent_style_index is 0x%04x, not 0x0fff\n"
fprintf(self.logfile, msg, xf.xf_index, xf.parent_style_index)
check_colour_indexes_in_obj(self, xf, xf.xf_index)
if xf.format_key not in self.format_map:
msg = "WARNING *** XF[%d] unknown (raw) format key (%d, 0x%04x)\n"
if self.verbosity:
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
def xf_epilogue(self):
# self is a Book instance.
self._xf_epilogue_done = 1
num_xfs = len(self.xf_list)
blah = DEBUG or self.verbosity >= 3
blah1 = DEBUG or self.verbosity >= 1
if blah:
fprintf(self.logfile, "xf_epilogue called ...\n")
def check_same(book_arg, xf_arg, parent_arg, attr):
# the _arg caper is to avoid a Warning msg from Python 2.1 :-(
if getattr(xf_arg, attr) != getattr(parent_arg, attr):
fprintf(book_arg.logfile,
"NOTE !!! XF[%d] parent[%d] %s different\n",
xf_arg.xf_index, parent_arg.xf_index, attr)
for xfx in xrange(num_xfs):
xf = self.xf_list[xfx]
if xf.format_key not in self.format_map:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
fmt = self.format_map[xf.format_key]
cellty = _cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions etc
if not self.formatting_info:
continue
if xf.is_style:
continue
if not(0 <= xf.parent_style_index < num_xfs):
if blah1:
fprintf(self.logfile,
"WARNING *** XF[%d]: is_style=%d but parent_style_index=%d\n",
xf.xf_index, xf.is_style, xf.parent_style_index)
# make it conform
xf.parent_style_index = 0
if self.biff_version >= 30:
if blah1:
if xf.parent_style_index == xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is also %d\n",
xf.xf_index, xf.parent_style_index)
elif not self.xf_list[xf.parent_style_index].is_style:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; style flag not set\n",
xf.xf_index, xf.parent_style_index)
if blah1 and xf.parent_style_index > xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n",
xf.xf_index, xf.parent_style_index)
parent = self.xf_list[xf.parent_style_index]
if not xf._alignment_flag and not parent._alignment_flag:
if blah1: check_same(self, xf, parent, 'alignment')
if not xf._background_flag and not parent._background_flag:
if blah1: check_same(self, xf, parent, 'background')
if not xf._border_flag and not parent._border_flag:
if blah1: check_same(self, xf, parent, 'border')
if not xf._protection_flag and not parent._protection_flag:
if blah1: check_same(self, xf, parent, 'protection')
if not xf._format_flag and not parent._format_flag:
if blah1 and xf.format_key != parent.format_key:
fprintf(self.logfile,
"NOTE !!! XF[%d] fmtk=%d, parent[%d] fmtk=%r\n%r / %r\n",
xf.xf_index, xf.format_key, parent.xf_index, parent.format_key,
self.format_map[xf.format_key].format_str,
self.format_map[parent.format_key].format_str)
if not xf._font_flag and not parent._font_flag:
if blah1 and xf.font_index != parent.font_index:
fprintf(self.logfile,
"NOTE !!! XF[%d] fontx=%d, parent[%d] fontx=%r\n",
xf.xf_index, xf.font_index, parent.xf_index, parent.font_index)
def initialise_book(book):
initialise_colour_map(book)
book._xf_epilogue_done = 0
methods = (
handle_font,
handle_efont,
handle_format,
is_date_format_string,
handle_palette,
palette_epilogue,
handle_style,
handle_xf,
xf_epilogue,
)
for method in methods:
setattr(book.__class__, method.__name__, method)
##
# <p>A collection of the border-related attributes of an XF record.
# Items correspond to those in the Excel UI's Format/Cells/Border tab.</p>
# <p> An explanations of "colour index" is given in the Formatting
# section at the start of this document.
# There are five line style attributes; possible values and the
# associated meanings are:
# 0 = No line,
# 1 = Thin,
# 2 = Medium,
# 3 = Dashed,
# 4 = Dotted,
# 5 = Thick,
# 6 = Double,
# 7 = Hair,
# 8 = Medium dashed,
# 9 = Thin dash-dotted,
# 10 = Medium dash-dotted,
# 11 = Thin dash-dot-dotted,
# 12 = Medium dash-dot-dotted,
# 13 = Slanted medium dash-dotted.
# The line styles 8 to 13 appear in BIFF8 files (Excel 97 and later) only.
# For pictures of the line styles, refer to OOo docs s3.10 (p22)
# "Line Styles for Cell Borders (BIFF3-BIFF8)".</p>
# <br /> -- New in version 0.6.1
class XFBorder(BaseObject, EqNeAttrs):
##
# The colour index for the cell's top line
top_colour_index = 0
##
# The colour index for the cell's bottom line
bottom_colour_index = 0
##
# The colour index for the cell's left line
left_colour_index = 0
##
# The colour index for the cell's right line
right_colour_index = 0
##
# The colour index for the cell's diagonal lines, if any
diag_colour_index = 0
##
# The line style for the cell's top line
top_line_style = 0
##
# The line style for the cell's bottom line
bottom_line_style = 0
##
# The line style for the cell's left line
left_line_style = 0
##
# The line style for the cell's right line
right_line_style = 0
##
# The line style for the cell's diagonal lines, if any
diag_line_style = 0
##
# 1 = draw a diagonal from top left to bottom right
diag_down = 0
##
# 1 = draw a diagonal from bottom left to top right
diag_up = 0
##
# A collection of the background-related attributes of an XF record.
# Items correspond to those in the Excel UI's Format/Cells/Patterns tab.
# An explanation of "colour index" is given in the Formatting
# section at the start of this document.
# <br /> -- New in version 0.6.1
class XFBackground(BaseObject, EqNeAttrs):
##
# See section 3.11 of the OOo docs.
fill_pattern = 0
##
# See section 3.11 of the OOo docs.
background_colour_index = 0
##
# See section 3.11 of the OOo docs.
pattern_colour_index = 0
##
# A collection of the alignment and similar attributes of an XF record.
# Items correspond to those in the Excel UI's Format/Cells/Alignment tab.
# <br /> -- New in version 0.6.1
class XFAlignment(BaseObject, EqNeAttrs):
##
# Values: section 6.115 (p 214) of OOo docs
hor_align = 0
##
# Values: section 6.115 (p 215) of OOo docs
vert_align = 0
##
# Values: section 6.115 (p 215) of OOo docs.<br />
# Note: file versions BIFF7 and earlier use the documented
# "orientation" attribute; this will be mapped (without loss)
# into "rotation".
rotation = 0
##
# 1 = text is wrapped at right margin
text_wrapped = 0
##
# A number in range(15).
indent_level = 0
##
# 1 = shrink font size to fit text into cell.
shrink_to_fit = 0
##
# 0 = according to context; 1 = left-to-right; 2 = right-to-left
text_direction = 0
##
# A collection of the protection-related attributes of an XF record.
# Items correspond to those in the Excel UI's Format/Cells/Protection tab.
# Note the OOo docs include the "cell or style" bit
# in this bundle of attributes.
# This is incorrect; the bit is used in determining which bundles to use.
# <br /> -- New in version 0.6.1
class XFProtection(BaseObject, EqNeAttrs):
##
# 1 = Cell is prevented from being changed, moved, resized, or deleted
# (only if the sheet is protected).
cell_locked = 0
##
# 1 = Hide formula so that it doesn't appear in the formula bar when
# the cell is selected (only if the sheet is protected).
formula_hidden = 0
##
# eXtended Formatting information for cells, rows, columns and styles.
# <br /> -- New in version 0.6.1
#
# <p>Each of the 6 flags below describes the validity of
# a specific group of attributes.
# <br />
# In cell XFs, flag==0 means the attributes of the parent style XF are used,
# (but only if the attributes are valid there); flag==1 means the attributes
# of this XF are used.<br />
# In style XFs, flag==0 means the attribute setting is valid; flag==1 means
# the attribute should be ignored.<br />
# Note that the API
# provides both "raw" XFs and "computed" XFs -- in the latter case, cell XFs
# have had the above inheritance mechanism applied.
# </p>
class XF(BaseObject):
##
# 0 = cell XF, 1 = style XF
is_style = 0
##
# cell XF: Index into Book.xf_list
# of this XF's style XF<br />
# style XF: 0xFFF
parent_style_index = 0
##
#
_format_flag = 0
##
#
_font_flag = 0
##
#
_alignment_flag = 0
##
#
_border_flag = 0
##
#
_background_flag = 0
##
#
_protection_flag = 0
##
# Index into Book.xf_list
xf_index = 0
##
# Index into Book.font_list
font_index = 0
##
# Key into Book.format_map
# <p>
# Warning: OOo docs on the XF record call this "Index to FORMAT record".
# It is not an index in the Python sense. It is a key to a map.
# It is true <i>only</i> for Excel 4.0 and earlier files
# that the key into format_map from an XF instance
# is the same as the index into format_list, and <i>only</i>
# if the index is less than 164.
# </p>
format_key = 0
##
# An instance of an XFProtection object.
protection = None
##
# An instance of an XFBackground object.
background = None
##
# An instance of an XFAlignment object.
alignment = None
##
# An instance of an XFBorder object.
border = None
| artistic-2.0 |
hyxer/mqttsn_secure | broker/MQTTSClient/Python/MQTTSNinternal.py | 2 | 5717 | """
/*******************************************************************************
* Copyright (c) 2011, 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Eclipse Distribution License v1.0 which accompany this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Ian Craggs - initial API and implementation and/or initial documentation
*******************************************************************************/
"""
import MQTTSN, time, sys, socket, traceback
debug = False
class Receivers:
def __init__(self, socket):
print "initializing receiver"
self.socket = socket
self.connected = False
self.observe = None
self.observed = []
self.inMsgs = {}
self.outMsgs = {}
self.puback = MQTTSN.Pubacks()
self.pubrec = MQTTSN.Pubrecs()
self.pubrel = MQTTSN.Pubrels()
self.pubcomp = MQTTSN.Pubcomps()
def lookfor(self, msgType):
self.observe = msgType
def waitfor(self, msgType, msgId=None):
msg = None
count = 0
while True:
while len(self.observed) > 0:
msg = self.observed.pop(0)
if msg.mh.MsgType == msgType and (msgId == None or msg.MsgId == msgId):
break
else:
msg = None
if msg != None:
break
time.sleep(0.2)
count += 1
if count == 25:
msg = None
break
self.observe = None
return msg
def receive(self, callback=None):
packet = None
try:
packet, address = MQTTSN.unpackPacket(MQTTSN.getPacket(self.socket))
except:
if sys.exc_info()[0] != socket.timeout:
print "unexpected exception", sys.exc_info()
raise sys.exc_info()
if packet == None:
time.sleep(0.1)
return
elif debug:
print packet
if self.observe == packet.mh.MsgType:
print "observed", packet
self.observed.append(packet)
elif packet.mh.MsgType == MQTTSN.ADVERTISE:
if hasattr(callback, "advertise"):
callback.advertise(address, packet.GwId, packet.Duration)
elif packet.mh.MsgType == MQTTSN.REGISTER:
if callback and hasattr(callback, "register"):
callback.register(packet.TopicId, packet.Topicname)
elif packet.mh.MsgType == MQTTSN.PUBACK:
"check if we are expecting a puback"
if self.outMsgs.has_key(packet.MsgId) and \
self.outMsgs[packet.MsgId].Flags.QoS == 1:
del self.outMsgs[packet.MsgId]
if hasattr(callback, "published"):
callback.published(packet.MsgId)
else:
raise Exception("No QoS 1 message with message id "+str(packet.MsgId)+" sent")
elif packet.mh.MsgType == MQTTSN.PUBREC:
if self.outMsgs.has_key(packet.MsgId):
self.pubrel.MsgId = packet.MsgId
self.socket.send(self.pubrel.pack())
else:
raise Exception("PUBREC received for unknown msg id "+ \
str(packet.MsgId))
elif packet.mh.MsgType == MQTTSN.PUBREL:
"release QOS 2 publication to client, & send PUBCOMP"
msgid = packet.MsgId
if not self.inMsgs.has_key(msgid):
pass # what should we do here?
else:
pub = self.inMsgs[packet.MsgId]
if callback == None or \
callback.messageArrived(pub.TopicName, pub.Data, 2, pub.Flags.Retain, pub.MsgId):
del self.inMsgs[packet.MsgId]
self.pubcomp.MsgId = packet.MsgId
self.socket.send(self.pubcomp.pack())
if callback == None:
return (pub.TopicName, pub.Data, 2, pub.Flags.Retain, pub.MsgId)
elif packet.mh.MsgType == MQTTSN.PUBCOMP:
"finished with this message id"
if self.outMsgs.has_key(packet.MsgId):
del self.outMsgs[packet.MsgId]
if hasattr(callback, "published"):
callback.published(packet.MsgId)
else:
raise Exception("PUBCOMP received for unknown msg id "+ \
str(packet.MsgId))
elif packet.mh.MsgType == MQTTSN.PUBLISH:
"finished with this message id"
if packet.Flags.QoS in [0, 3]:
qos = packet.Flags.QoS
topicname = packet.TopicName
data = packet.Data
if qos == 3:
qos = -1
if packet.Flags.TopicIdType == MQTTSN.TOPICID:
topicname = packet.Data[:packet.TopicId]
data = packet.Data[packet.TopicId:]
if callback == None:
return (topicname, data, qos, packet.Flags.Retain, packet.MsgId)
else:
callback.messageArrived(topicname, data, qos, packet.Flags.Retain, packet.MsgId)
elif packet.Flags.QoS == 1:
if callback == None:
return (packet.topicName, packet.Data, 1,
packet.Flags.Retain, packet.MsgId)
else:
if callback.messageArrived(packet.TopicName, packet.Data, 1,
packet.Flags.Retain, packet.MsgId):
self.puback.MsgId = packet.MsgId
self.socket.send(self.puback.pack())
elif packet.Flags.QoS == 2:
self.inMsgs[packet.MsgId] = packet
self.pubrec.MsgId = packet.MsgId
self.socket.send(self.pubrec.pack())
else:
raise Exception("Unexpected packet"+str(packet))
return packet
def __call__(self, callback):
try:
while True:
self.receive(callback)
except:
if sys.exc_info()[0] != socket.error:
print "unexpected exception", sys.exc_info()
traceback.print_exc()
| mit |
dragonfly-science/django-pigeonpost | pigeonpost/south_migrations/0001_initial.py | 1 | 7960 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Pigeon'
db.create_table('pigeonpost_pigeon', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source_content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('source_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('successes', self.gf('django.db.models.fields.IntegerField')(default=0)),
('failures', self.gf('django.db.models.fields.IntegerField')(default=0)),
('to_send', self.gf('django.db.models.fields.BooleanField')(default=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('send_to', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('send_to_method', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('render_email_method', self.gf('django.db.models.fields.TextField')(default='render_email')),
('scheduled_for', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('pigeonpost', ['Pigeon'])
# Adding model 'Outbox'
db.create_table('pigeonpost_outbox', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pigeon', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pigeonpost.Pigeon'], null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.TextField')()),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('succeeded', self.gf('django.db.models.fields.BooleanField')(default=False)),
('failures', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('pigeonpost', ['Outbox'])
# Adding unique constraint on 'Outbox', fields ['pigeon', 'user']
db.create_unique('pigeonpost_outbox', ['pigeon_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'Outbox', fields ['pigeon', 'user']
db.delete_unique('pigeonpost_outbox', ['pigeon_id', 'user_id'])
# Deleting model 'Pigeon'
db.delete_table('pigeonpost_pigeon')
# Deleting model 'Outbox'
db.delete_table('pigeonpost_outbox')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pigeonpost.outbox': {
'Meta': {'ordering': "['sent_at']", 'unique_together': "(('pigeon', 'user'),)", 'object_name': 'Outbox'},
'failures': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'pigeon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pigeonpost.Pigeon']", 'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'pigeonpost.pigeon': {
'Meta': {'ordering': "['scheduled_for']", 'object_name': 'Pigeon'},
'failures': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'render_email_method': ('django.db.models.fields.TextField', [], {'default': "'render_email'"}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {}),
'send_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'send_to_method': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'source_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'successes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'to_send': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['pigeonpost'] | mit |
JFCM121CAKE/android_kernel_samsung_jf | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
karthik-sethuraman/Snowmass-ONFOpenTransport | RI/flask_server/tapi_server/controllers/tapi_path_computation_controller.py | 4 | 74967 | import connexion
import six
from tapi_server.models.inline_object import InlineObject # noqa: E501
from tapi_server.models.inline_object11 import InlineObject11 # noqa: E501
from tapi_server.models.inline_object26 import InlineObject26 # noqa: E501
from tapi_server.models.tapi_common_bandwidth_profile import TapiCommonBandwidthProfile # noqa: E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: E501
from tapi_server.models.tapi_common_capacity_value import TapiCommonCapacityValue # noqa: E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: E501
from tapi_server.models.tapi_common_service_interface_point_ref import TapiCommonServiceInterfacePointRef # noqa: E501
from tapi_server.models.tapi_path_computation_compute_p2_p_path import TapiPathComputationComputeP2PPath # noqa: E501
from tapi_server.models.tapi_path_computation_delete_p2_p_path import TapiPathComputationDeleteP2PPath # noqa: E501
from tapi_server.models.tapi_path_computation_optimize_p2_p_path import TapiPathComputationOptimizeP2PPath # noqa: E501
from tapi_server.models.tapi_path_computation_path import TapiPathComputationPath # noqa: E501
from tapi_server.models.tapi_path_computation_path_computation_context import TapiPathComputationPathComputationContext # noqa: E501
from tapi_server.models.tapi_path_computation_path_computation_service import TapiPathComputationPathComputationService # noqa: E501
from tapi_server.models.tapi_path_computation_path_objective_function import TapiPathComputationPathObjectiveFunction # noqa: E501
from tapi_server.models.tapi_path_computation_path_optimization_constraint import TapiPathComputationPathOptimizationConstraint # noqa: E501
from tapi_server.models.tapi_path_computation_path_ref import TapiPathComputationPathRef # noqa: E501
from tapi_server.models.tapi_path_computation_path_service_end_point import TapiPathComputationPathServiceEndPoint # noqa: E501
from tapi_server.models.tapi_path_computation_routing_constraint import TapiPathComputationRoutingConstraint # noqa: E501
from tapi_server.models.tapi_path_computation_topology_constraint import TapiPathComputationTopologyConstraint # noqa: E501
from tapi_server.models.tapi_topology_cost_characteristic import TapiTopologyCostCharacteristic # noqa: E501
from tapi_server.models.tapi_topology_latency_characteristic import TapiTopologyLatencyCharacteristic # noqa: E501
from tapi_server.models.tapi_topology_link_ref import TapiTopologyLinkRef # noqa: E501
from tapi_server.models.tapi_topology_node_ref import TapiTopologyNodeRef # noqa: E501
from tapi_server.models.tapi_topology_risk_characteristic import TapiTopologyRiskCharacteristic # noqa: E501
from tapi_server.models.tapi_topology_topology_ref import TapiTopologyTopologyRef # noqa: E501
from tapi_server import util
def data_context_path_computation_context_delete(): # noqa: E501
"""data_context_path_computation_context_delete
removes tapi.path.computation.PathComputationContext # noqa: E501
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_get(): # noqa: E501
"""data_context_path_computation_context_get
returns tapi.path.computation.PathComputationContext # noqa: E501
:rtype: TapiPathComputationPathComputationContext
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_service_post(tapi_path_computation_path_computation_service=None): # noqa: E501
"""data_context_path_computation_context_path_comp_service_post
creates tapi.path.computation.PathComputationService # noqa: E501
:param tapi_path_computation_path_computation_service: tapi.path.computation.PathComputationService to be added to list
:type tapi_path_computation_path_computation_service: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_computation_service = TapiPathComputationPathComputationService.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_delete(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_delete
removes tapi.path.computation.PathComputationService # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_point_post(uuid, tapi_path_computation_path_service_end_point=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_point_post
creates tapi.path.computation.PathServiceEndPoint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_service_end_point: tapi.path.computation.PathServiceEndPoint to be added to list
:type tapi_path_computation_path_service_end_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_service_end_point = TapiPathComputationPathServiceEndPoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_put(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_put(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_information_rate_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_delete
removes tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_get
returns tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonBandwidthProfile
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_put(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_put(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_post(uuid, local_id, tapi_common_bandwidth_profile=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_post
creates tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_bandwidth_profile: tapi.common.BandwidthProfile to be added to list
:type tapi_common_bandwidth_profile: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_bandwidth_profile = TapiCommonBandwidthProfile.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_put(uuid, local_id, tapi_common_bandwidth_profile=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_put
creates or updates tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_bandwidth_profile: tapi.common.BandwidthProfile to be added or updated
:type tapi_common_bandwidth_profile: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_bandwidth_profile = TapiCommonBandwidthProfile.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_delete
removes tapi.common.Capacity # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_get
returns tapi.common.Capacity # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacity
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_post(uuid, local_id, tapi_common_capacity=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_post
creates tapi.common.Capacity # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity: tapi.common.Capacity to be added to list
:type tapi_common_capacity: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity = TapiCommonCapacity.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_put(uuid, local_id, tapi_common_capacity=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_put
creates or updates tapi.common.Capacity # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity: tapi.common.Capacity to be added or updated
:type tapi_common_capacity: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity = TapiCommonCapacity.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_put(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_total_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_delete
removes tapi.path.computation.PathServiceEndPoint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_get
returns tapi.path.computation.PathServiceEndPoint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiPathComputationPathServiceEndPoint
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_name_post(uuid, local_id, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_delete(uuid, local_id, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_get(uuid, local_id, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_post(uuid, local_id, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_put(uuid, local_id, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_post(uuid, local_id, tapi_path_computation_path_service_end_point=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_post
creates tapi.path.computation.PathServiceEndPoint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_path_computation_path_service_end_point: tapi.path.computation.PathServiceEndPoint to be added to list
:type tapi_path_computation_path_service_end_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_service_end_point = TapiPathComputationPathServiceEndPoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_put(uuid, local_id, tapi_path_computation_path_service_end_point=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_put
creates or updates tapi.path.computation.PathServiceEndPoint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_path_computation_path_service_end_point: tapi.path.computation.PathServiceEndPoint to be added or updated
:type tapi_path_computation_path_service_end_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_service_end_point = TapiPathComputationPathServiceEndPoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_service_interface_point_get(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_service_interface_point_get
returns tapi.common.ServiceInterfacePointRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonServiceInterfacePointRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_get(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_get
returns tapi.path.computation.PathComputationService # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: TapiPathComputationPathComputationService
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_delete(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_delete(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_delete
removes tapi.path.computation.PathObjectiveFunction # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_get(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_get
returns tapi.path.computation.PathObjectiveFunction # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: TapiPathComputationPathObjectiveFunction
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_delete(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_post(uuid, tapi_path_computation_path_objective_function=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_post
creates tapi.path.computation.PathObjectiveFunction # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_objective_function: tapi.path.computation.PathObjectiveFunction to be added to list
:type tapi_path_computation_path_objective_function: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_objective_function = TapiPathComputationPathObjectiveFunction.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_objective_function_put(uuid, tapi_path_computation_path_objective_function=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_objective_function_put
creates or updates tapi.path.computation.PathObjectiveFunction # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_objective_function: tapi.path.computation.PathObjectiveFunction to be added or updated
:type tapi_path_computation_path_objective_function: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_objective_function = TapiPathComputationPathObjectiveFunction.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_delete(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_delete
removes tapi.path.computation.PathOptimizationConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_get(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_get
returns tapi.path.computation.PathOptimizationConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: TapiPathComputationPathOptimizationConstraint
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_delete(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_post(uuid, tapi_path_computation_path_optimization_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_post
creates tapi.path.computation.PathOptimizationConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_optimization_constraint: tapi.path.computation.PathOptimizationConstraint to be added to list
:type tapi_path_computation_path_optimization_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_optimization_constraint = TapiPathComputationPathOptimizationConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_put(uuid, tapi_path_computation_path_optimization_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_optimization_constraint_put
creates or updates tapi.path.computation.PathOptimizationConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_optimization_constraint: tapi.path.computation.PathOptimizationConstraint to be added or updated
:type tapi_path_computation_path_optimization_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_optimization_constraint = TapiPathComputationPathOptimizationConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_pathpath_uuid_get(uuid, path_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_pathpath_uuid_get
returns tapi.path.computation.PathRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param path_uuid: Id of path
:type path_uuid: str
:rtype: TapiPathComputationPathRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_post(uuid, tapi_path_computation_path_computation_service=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_post
creates tapi.path.computation.PathComputationService # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_computation_service: tapi.path.computation.PathComputationService to be added to list
:type tapi_path_computation_path_computation_service: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_computation_service = TapiPathComputationPathComputationService.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_put(uuid, tapi_path_computation_path_computation_service=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_put
creates or updates tapi.path.computation.PathComputationService # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_path_computation_service: tapi.path.computation.PathComputationService to be added or updated
:type tapi_path_computation_path_computation_service: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_computation_service = TapiPathComputationPathComputationService.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristic_post(uuid, tapi_topology_cost_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristic_post
creates tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_topology_cost_characteristic: tapi.topology.CostCharacteristic to be added to list
:type tapi_topology_cost_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_cost_characteristic = TapiTopologyCostCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_delete(uuid, cost_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_delete
removes tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_get(uuid, cost_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_get
returns tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:rtype: TapiTopologyCostCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_post(uuid, cost_name, tapi_topology_cost_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_post
creates tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:param tapi_topology_cost_characteristic: tapi.topology.CostCharacteristic to be added to list
:type tapi_topology_cost_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_cost_characteristic = TapiTopologyCostCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_put(uuid, cost_name, tapi_topology_cost_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_cost_characteristiccost_name_put
creates or updates tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:param tapi_topology_cost_characteristic: tapi.topology.CostCharacteristic to be added or updated
:type tapi_topology_cost_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_cost_characteristic = TapiTopologyCostCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_delete(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_delete
removes tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_get(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_get
returns tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: TapiPathComputationRoutingConstraint
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristic_post(uuid, tapi_topology_latency_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristic_post
creates tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_topology_latency_characteristic: tapi.topology.LatencyCharacteristic to be added to list
:type tapi_topology_latency_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_latency_characteristic = TapiTopologyLatencyCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_delete(uuid, traffic_property_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_delete
removes tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_get(uuid, traffic_property_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_get
returns tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: TapiTopologyLatencyCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_post(uuid, traffic_property_name, tapi_topology_latency_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_post
creates tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:param tapi_topology_latency_characteristic: tapi.topology.LatencyCharacteristic to be added to list
:type tapi_topology_latency_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_latency_characteristic = TapiTopologyLatencyCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_put(uuid, traffic_property_name, tapi_topology_latency_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_latency_characteristictraffic_property_name_put
creates or updates tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:param tapi_topology_latency_characteristic: tapi.topology.LatencyCharacteristic to be added or updated
:type tapi_topology_latency_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_latency_characteristic = TapiTopologyLatencyCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post(uuid, tapi_path_computation_routing_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post
creates tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_routing_constraint: tapi.path.computation.RoutingConstraint to be added to list
:type tapi_path_computation_routing_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_routing_constraint = TapiPathComputationRoutingConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_put(uuid, tapi_path_computation_routing_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_put
creates or updates tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_routing_constraint: tapi.path.computation.RoutingConstraint to be added or updated
:type tapi_path_computation_routing_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_routing_constraint = TapiPathComputationRoutingConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristic_post(uuid, tapi_topology_risk_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristic_post
creates tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_topology_risk_characteristic: tapi.topology.RiskCharacteristic to be added to list
:type tapi_topology_risk_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_risk_characteristic = TapiTopologyRiskCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_delete(uuid, risk_characteristic_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_delete
removes tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param risk_characteristic_name: Id of risk-diversity-characteristic
:type risk_characteristic_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_get(uuid, risk_characteristic_name): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_get
returns tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param risk_characteristic_name: Id of risk-diversity-characteristic
:type risk_characteristic_name: str
:rtype: TapiTopologyRiskCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_post(uuid, risk_characteristic_name, tapi_topology_risk_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_post
creates tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param risk_characteristic_name: Id of risk-diversity-characteristic
:type risk_characteristic_name: str
:param tapi_topology_risk_characteristic: tapi.topology.RiskCharacteristic to be added to list
:type tapi_topology_risk_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_risk_characteristic = TapiTopologyRiskCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_put(uuid, risk_characteristic_name, tapi_topology_risk_characteristic=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_put
creates or updates tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param risk_characteristic_name: Id of risk-diversity-characteristic
:type risk_characteristic_name: str
:param tapi_topology_risk_characteristic: tapi.topology.RiskCharacteristic to be added or updated
:type tapi_topology_risk_characteristic: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_topology_risk_characteristic = TapiTopologyRiskCharacteristic.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_avoid_topologytopology_uuid_get(uuid, topology_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_avoid_topologytopology_uuid_get
returns tapi.topology.TopologyRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of avoid-topology
:type topology_uuid: str
:rtype: TapiTopologyTopologyRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_delete(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_delete
removes tapi.path.computation.TopologyConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_linktopology_uuidlink_uuid_get
returns tapi.topology.LinkRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of exclude-link
:type topology_uuid: str
:param link_uuid: Id of exclude-link
:type link_uuid: str
:rtype: TapiTopologyLinkRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_nodetopology_uuidnode_uuid_get(uuid, topology_uuid, node_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_nodetopology_uuidnode_uuid_get
returns tapi.topology.NodeRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of exclude-node
:type topology_uuid: str
:param node_uuid: Id of exclude-node
:type node_uuid: str
:rtype: TapiTopologyNodeRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_pathpath_uuid_get(uuid, path_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_exclude_pathpath_uuid_get
returns tapi.path.computation.PathRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param path_uuid: Id of exclude-path
:type path_uuid: str
:rtype: TapiPathComputationPathRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_get(uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_get
returns tapi.path.computation.TopologyConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:rtype: TapiPathComputationTopologyConstraint
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_linktopology_uuidlink_uuid_get
returns tapi.topology.LinkRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of include-link
:type topology_uuid: str
:param link_uuid: Id of include-link
:type link_uuid: str
:rtype: TapiTopologyLinkRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_nodetopology_uuidnode_uuid_get(uuid, topology_uuid, node_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_nodetopology_uuidnode_uuid_get
returns tapi.topology.NodeRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of include-node
:type topology_uuid: str
:param node_uuid: Id of include-node
:type node_uuid: str
:rtype: TapiTopologyNodeRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_pathpath_uuid_get(uuid, path_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_pathpath_uuid_get
returns tapi.path.computation.PathRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param path_uuid: Id of include-path
:type path_uuid: str
:rtype: TapiPathComputationPathRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_topologytopology_uuid_get(uuid, topology_uuid): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_include_topologytopology_uuid_get
returns tapi.topology.TopologyRef # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param topology_uuid: Id of include-topology
:type topology_uuid: str
:rtype: TapiTopologyTopologyRef
"""
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_post(uuid, tapi_path_computation_topology_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_post
creates tapi.path.computation.TopologyConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_topology_constraint: tapi.path.computation.TopologyConstraint to be added to list
:type tapi_path_computation_topology_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_topology_constraint = TapiPathComputationTopologyConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_put(uuid, tapi_path_computation_topology_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_topology_constraint_put
creates or updates tapi.path.computation.TopologyConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_topology_constraint: tapi.path.computation.TopologyConstraint to be added or updated
:type tapi_path_computation_topology_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_topology_constraint = TapiPathComputationTopologyConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_pathuuid_get(uuid): # noqa: E501
"""data_context_path_computation_context_pathuuid_get
returns tapi.path.computation.Path # noqa: E501
:param uuid: Id of path
:type uuid: str
:rtype: TapiPathComputationPath
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501
"""data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get
returns tapi.topology.LinkRef # noqa: E501
:param uuid: Id of path
:type uuid: str
:param topology_uuid: Id of link
:type topology_uuid: str
:param link_uuid: Id of link
:type link_uuid: str
:rtype: TapiTopologyLinkRef
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_path_computation_context_pathuuid_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of path
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_routing_constraint_cost_characteristiccost_name_get(uuid, cost_name): # noqa: E501
"""data_context_path_computation_context_pathuuid_routing_constraint_cost_characteristiccost_name_get
returns tapi.topology.CostCharacteristic # noqa: E501
:param uuid: Id of path
:type uuid: str
:param cost_name: Id of cost-characteristic
:type cost_name: str
:rtype: TapiTopologyCostCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_routing_constraint_get(uuid): # noqa: E501
"""data_context_path_computation_context_pathuuid_routing_constraint_get
returns tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path
:type uuid: str
:rtype: TapiPathComputationRoutingConstraint
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_routing_constraint_latency_characteristictraffic_property_name_get(uuid, traffic_property_name): # noqa: E501
"""data_context_path_computation_context_pathuuid_routing_constraint_latency_characteristictraffic_property_name_get
returns tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of path
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: TapiTopologyLatencyCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_pathuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_get(uuid, risk_characteristic_name): # noqa: E501
"""data_context_path_computation_context_pathuuid_routing_constraint_risk_diversity_characteristicrisk_characteristic_name_get
returns tapi.topology.RiskCharacteristic # noqa: E501
:param uuid: Id of path
:type uuid: str
:param risk_characteristic_name: Id of risk-diversity-characteristic
:type risk_characteristic_name: str
:rtype: TapiTopologyRiskCharacteristic
"""
return 'do some magic!'
def data_context_path_computation_context_post(tapi_path_computation_path_computation_context=None): # noqa: E501
"""data_context_path_computation_context_post
creates tapi.path.computation.PathComputationContext # noqa: E501
:param tapi_path_computation_path_computation_context: tapi.path.computation.PathComputationContext to be added to list
:type tapi_path_computation_path_computation_context: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_computation_context = TapiPathComputationPathComputationContext.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_path_computation_context_put(tapi_path_computation_path_computation_context=None): # noqa: E501
"""data_context_path_computation_context_put
creates or updates tapi.path.computation.PathComputationContext # noqa: E501
:param tapi_path_computation_path_computation_context: tapi.path.computation.PathComputationContext to be added or updated
:type tapi_path_computation_path_computation_context: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_path_computation_context = TapiPathComputationPathComputationContext.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def operations_compute_p_2_p_path_post(inline_object=None): # noqa: E501
"""operations_compute_p2_p_path_post
# noqa: E501
:param inline_object:
:type inline_object: dict | bytes
:rtype: TapiPathComputationComputeP2PPath
"""
if connexion.request.is_json:
inline_object = InlineObject.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def operations_delete_p_2_p_path_post(inline_object11=None): # noqa: E501
"""operations_delete_p2_p_path_post
# noqa: E501
:param inline_object11:
:type inline_object11: dict | bytes
:rtype: TapiPathComputationDeleteP2PPath
"""
if connexion.request.is_json:
inline_object11 = InlineObject11.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def operations_optimize_p_2_p_path_post(inline_object26=None): # noqa: E501
"""operations_optimize_p2_p_path_post
# noqa: E501
:param inline_object26:
:type inline_object26: dict | bytes
:rtype: TapiPathComputationOptimizeP2PPath
"""
if connexion.request.is_json:
inline_object26 = InlineObject26.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| apache-2.0 |
ewdurbin/sentry | src/sentry/models/tagkey.py | 11 | 3121 | """
sentry.models.tagkey
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sentry.constants import MAX_TAG_KEY_LENGTH, TAG_LABELS
from sentry.db.models import (
Model, BoundedPositiveIntegerField, FlexibleForeignKey, sane_repr
)
from sentry.db.models.manager import BaseManager
from sentry.utils.cache import cache
from sentry.utils.http import absolute_uri
# TODO(dcramer): pull in enum library
class TagKeyStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class TagKeyManager(BaseManager):
def _get_cache_key(self, project_id):
return 'filterkey:all:%s' % project_id
def all_keys(self, project):
# TODO: cache invalidation via post_save/post_delete signals much like BaseManager
key = self._get_cache_key(project.id)
result = cache.get(key)
if result is None:
result = list(self.filter(
project=project,
status=TagKeyStatus.VISIBLE,
).values_list('key', flat=True))
cache.set(key, result, 60)
return result
class TagKey(Model):
"""
Stores references to available filters keys.
"""
__core__ = False
DEFAULT_URL_NAME = 'sentry-explore-tag'
URL_NAMES = {
'sentry:user': 'sentry-users',
'sentry:filename': 'sentry-explore-code',
'sentry:function': 'sentry-explore-code-by-function',
}
project = FlexibleForeignKey('sentry.Project')
key = models.CharField(max_length=MAX_TAG_KEY_LENGTH)
values_seen = BoundedPositiveIntegerField(default=0)
label = models.CharField(max_length=64, null=True)
status = BoundedPositiveIntegerField(choices=(
(TagKeyStatus.VISIBLE, _('Visible')),
(TagKeyStatus.PENDING_DELETION, _('Pending Deletion')),
(TagKeyStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=TagKeyStatus.VISIBLE)
objects = TagKeyManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_filterkey'
unique_together = (('project', 'key'),)
__repr__ = sane_repr('project_id', 'key')
def get_label(self):
return self.label \
or TAG_LABELS.get(self.key) \
or self.key.replace('_', ' ').title()
def get_absolute_url(self):
# HACK(dcramer): quick and dirty way to support code/users
try:
url_name = self.URL_NAMES[self.key]
except KeyError:
url_name = self.DEFAULT_URL_NAME
return absolute_uri(reverse(url_name, args=[
self.project.organization.slug, self.project.slug, self.key]))
return absolute_uri(reverse(url_name, args=[
self.project.organization.slug, self.project.slug]))
def get_audit_log_data(self):
return {
'key': self.key,
}
| bsd-3-clause |
azlanismail/prismgames | examples/games/car/networkx/algorithms/traversal/tests/test_dfs.py | 1 | 2144 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDFS:
def setUp(self):
# simple graph
G=nx.Graph()
G.add_edges_from([(0,1),(1,2),(1,3),(2,4),(3,4)])
self.G=G
# simple graph, disconnected
D=nx.Graph()
D.add_edges_from([(0,1),(2,3)])
self.D=D
def test_preorder_nodes(self):
assert_equal(list(nx.dfs_preorder_nodes(self.G,source=0)),
[0, 1, 2, 4, 3])
assert_equal(list(nx.dfs_preorder_nodes(self.D)),[0, 1, 2, 3])
def test_postorder_nodes(self):
assert_equal(list(nx.dfs_postorder_nodes(self.G,source=0)),
[3, 4, 2, 1, 0])
assert_equal(list(nx.dfs_postorder_nodes(self.D)),[1, 0, 3, 2])
def test_successor(self):
assert_equal(nx.dfs_successors(self.G,source=0),
{0: [1], 1: [2], 2: [4], 4: [3]})
assert_equal(nx.dfs_successors(self.D), {0: [1], 2: [3]})
def test_predecessor(self):
assert_equal(nx.dfs_predecessors(self.G,source=0),
{1: 0, 2: 1, 3: 4, 4: 2})
assert_equal(nx.dfs_predecessors(self.D), {1: 0, 3: 2})
def test_dfs_tree(self):
T=nx.dfs_tree(self.G,source=0)
assert_equal(sorted(T.nodes()),sorted(self.G.nodes()))
assert_equal(sorted(T.edges()),[(0, 1), (1, 2), (2, 4), (4, 3)])
def test_dfs_edges(self):
edges=nx.dfs_edges(self.G,source=0)
assert_equal(list(edges),[(0, 1), (1, 2), (2, 4), (4, 3)])
edges=nx.dfs_edges(self.D)
assert_equal(list(edges),[(0, 1), (2, 3)])
def test_dfs_labeled_edges(self):
edges=list(nx.dfs_labeled_edges(self.G,source=0))
forward=[(u,v) for (u,v,d) in edges if d['dir']=='forward']
assert_equal(forward,[(0,0), (0, 1), (1, 2), (2, 4), (4, 3)])
def test_dfs_labeled_disconnected_edges(self):
edges=list(nx.dfs_labeled_edges(self.D))
forward=[(u,v) for (u,v,d) in edges if d['dir']=='forward']
assert_equal(forward,[(0, 0), (0, 1), (2, 2), (2, 3)])
| gpl-2.0 |
elkingtonmcb/PredictionIO | examples/scala-parallel-similarproduct/no-set-user/data/import_eventserver.py | 47 | 1671 | """
Import sample data for similar product engine
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print client.get_status()
print "Importing data..."
# generate 10 users, with user ids u1,u2,....,u10
user_ids = ["u%s" % i for i in range(1, 11)]
# generate 50 items, with item ids i1,i2,....,i50
# random assign 1 to 4 categories among c1-c6 to items
categories = ["c%s" % i for i in range(1, 7)]
item_ids = ["i%s" % i for i in range(1, 51)]
for item_id in item_ids:
print "Set item", item_id
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"categories" : random.sample(categories, random.randint(1, 4))
}
)
count += 1
# each user randomly viewed 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
print "User", user_id ,"views item", viewed_item
client.create_event(
event="view",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
print "%s events are imported." % count
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for similar product engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
| apache-2.0 |
trishnaguha/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_host_facts.py | 13 | 7406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) [email protected]
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
RETURN = '''
---
hosts:
description: List of Zabbix hosts. See https://www.zabbix.com/documentation/3.4/manual/api/reference/host/get for list of host values.
returned: success
type: dict
sample: [ { "available": "1", "description": "", "disable_until": "0", "error": "", "flags": "0", "groups": ["1"], "host": "Host A", ... } ]
'''
DOCUMENTATION = '''
---
module: zabbix_host_facts
short_description: Gather facts about Zabbix host
description:
- This module allows you to search for Zabbix host entries.
version_added: "2.7"
author:
- "Michael Miko (@RedWhiteMiko)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
host_ip:
description:
- Host interface IP of the host in Zabbix.
required: false
exact_match:
description:
- Find the exact match
type: bool
default: no
remove_duplicate:
description:
- Remove duplicate host from host result
type: bool
default: yes
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Get host info
local_action:
module: zabbix_host_facts
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
host_ip: 127.0.0.1
timeout: 10
exact_match: no
remove_duplicate: yes
'''
from ansible.module_utils.basic import AnsibleModule
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, validate_certs, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd, validate_certs=validate_certs)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
def get_hosts_by_host_name(self, host_name, exact_match):
""" Get host by host name """
search_key = 'search'
if exact_match:
search_key = 'filter'
host_list = self._zapi.host.get({'output': 'extend', 'selectParentTemplates': ['name'], search_key: {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list
def get_hosts_by_ip(self, host_ips):
""" Get host by host ip(s) """
hostinterfaces = self._zapi.hostinterface.get({
'output': 'extend',
'filter': {
'ip': host_ips
}
})
if len(hostinterfaces) < 1:
self._module.fail_json(msg="Host not found: %s" % host_ips)
host_list = []
for hostinterface in hostinterfaces:
host = self._zapi.host.get({
'output': 'extend',
'selectGroups': 'extend',
'selectParentTemplates': ['name'],
'hostids': hostinterface['hostid']
})
host[0]['hostinterfaces'] = hostinterface
host_list.append(host[0])
return host_list
def delete_duplicate_hosts(self, hosts):
""" Delete duplicated hosts """
unique_hosts = []
listed_hostnames = []
for zabbix_host in hosts:
if zabbix_host['name'] in listed_hostnames:
self._zapi.host.delete([zabbix_host['hostid']])
continue
unique_hosts.append(zabbix_host)
listed_hostnames.append(zabbix_host['name'])
return unique_hosts
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', default='', required=False),
host_ip=dict(type='list', default=[], required=False),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
timeout=dict(type='int', default=10),
exact_match=dict(type='bool', required=False, default=False),
remove_duplicate=dict(type='bool', required=False, default=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
host_name = module.params['host_name']
host_ips = module.params['host_ip']
timeout = module.params['timeout']
exact_match = module.params['exact_match']
is_remove_duplicate = module.params['remove_duplicate']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
if host_name:
hosts = host.get_hosts_by_host_name(host_name, exact_match)
if is_remove_duplicate:
hosts = host.delete_duplicate_hosts(hosts)
extended_hosts = []
for zabbix_host in hosts:
zabbix_host['hostinterfaces'] = host._zapi.hostinterface.get({
'output': 'extend', 'hostids': zabbix_host['hostid']
})
extended_hosts.append(zabbix_host)
module.exit_json(ok=True, hosts=extended_hosts)
elif host_ips:
extended_hosts = host.get_hosts_by_ip(host_ips)
if is_remove_duplicate:
hosts = host.delete_duplicate_hosts(extended_hosts)
module.exit_json(ok=True, hosts=extended_hosts)
else:
module.exit_json(ok=False, hosts=[], result="No Host present")
if __name__ == '__main__':
main()
| gpl-3.0 |
znoland3/zachdemo | venvdir/lib/python3.4/site-packages/dominate/document.py | 37 | 2217 | __license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
from . import tags
try:
basestring = basestring
except NameError: # py3
basestring = str
unicode = str
class document(tags.html):
tagname = 'html'
def __init__(self, title='Dominate', doctype='<!DOCTYPE html>', request=None):
'''
Creates a new document instance. Accepts `title`, `doctype`, and `request` keyword arguments.
'''
super(document, self).__init__()
self.doctype = doctype
self.head = super(document, self).add(tags.head())
self.body = super(document, self).add(tags.body())
self.title_node = self.head.add(tags.title(title))
self._entry = self.body
def get_title(self):
return self.title_node.text
def set_title(self, title):
if isinstance(title, basestring):
self.title_node.text = title
else:
self.head.remove(self.title_node)
self.head.add(title)
self.title_node = title
title = property(get_title, set_title)
def add(self, *args):
'''
Adding tags to a document appends them to the <body>.
'''
return self._entry.add(*args)
def render(self, *args, **kwargs):
'''
Creates a <title> tag if not present and renders the DOCTYPE and tag tree.
'''
r = []
#Validates the tag tree and adds the doctype if one was set
if self.doctype:
r.append(self.doctype)
r.append('\n')
r.append(super(document, self).render(*args, **kwargs))
return u''.join(r)
__str__ = __unicode__ = render
def __repr__(self):
return '<dominate.document "%s">' % self.title
| mit |
40223249-1/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/token.py | 743 | 3034 | """Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
RARROW = 50
ELLIPSIS = 51
OP = 52
ERRORTOKEN = 53
N_TOKENS = 54
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
_main()
| gpl-3.0 |
juanalfonsopr/odoo | addons/report_webkit/convert.py | 322 | 2581 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools import convert
original_xml_import = convert.xml_import
class WebkitXMLImport(original_xml_import):
# Override of xml import in order to add webkit_header tag in report tag.
# As discussed with the R&D Team, the current XML processing API does
# not offer enough flexibity to do it in a cleaner way.
# The solution is not meant to be long term solution, but at least
# allows chaining of several overrides of the _tag_report method,
# and does not require a copy/paste of the original code.
def _tag_report(self, cr, rec, data_node=None, mode=None):
report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node)
if rec.get('report_type') == 'webkit':
header = rec.get('webkit_header')
if header:
if header in ('False', '0', 'None'):
webkit_header_id = False
else:
webkit_header_id = self.id_get(cr, header)
self.pool.get('ir.actions.report.xml').write(cr, self.uid,
report_id, {'webkit_header': webkit_header_id})
return report_id
convert.xml_import = WebkitXMLImport
| agpl-3.0 |
syphar/django | tests/template_tests/filter_tests/test_stringformat.py | 73 | 1191 | from django.template.defaultfilters import stringformat
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StringformatTests(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup({
'stringformat01':
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}'
})
def test_stringformat01(self):
output = self.engine.render_to_string('stringformat01', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
@setup({'stringformat02': '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'})
def test_stringformat02(self):
output = self.engine.render_to_string('stringformat02', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
class FunctionTests(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, '03d'), '001')
def test_invalid(self):
self.assertEqual(stringformat(1, 'z'), '')
| bsd-3-clause |
MarsSnail/gyp_tools | test/configurations/basics/gyptest-configurations.py | 72 | 1068 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'android':
# This test currently fails on android. Investigate why, fix the issues
# responsible, and reenable this test on android. See bug:
# https://code.google.com/p/gyp/issues/detail?id=436
test.skip_test(message='Test fails on android. Fix and reenable.\n')
test.run_gyp('configurations.gyp')
test.set_configuration('Release')
test.build('configurations.gyp')
test.run_built_executable('configurations', stdout="Release configuration\n")
test.set_configuration('Debug')
test.build('configurations.gyp')
test.run_built_executable('configurations', stdout="Debug configuration\n")
test.set_configuration('Foo')
test.build('configurations.gyp')
test.run_built_executable('configurations', stdout="Foo configuration\n")
test.pass_test()
| bsd-3-clause |
AnderEnder/ansible-modules-extras | messaging/rabbitmq_user.py | 65 | 10113 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_user
short_description: Adds or removes users to RabbitMQ
description:
- Add or remove users to RabbitMQ and assign permissions
version_added: "1.1"
author: '"Chris Hoffman (@chrishoffman)"'
options:
user:
description:
- Name of user to add
required: true
default: null
aliases: [username, name]
password:
description:
- Password of user to add.
- To change the password of an existing user, you must also specify
C(force=yes).
required: false
default: null
tags:
description:
- User tags specified as comma delimited
required: false
default: null
permissions:
description:
- a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv,
and represents a permission rule for that vhost.
- This option should be preferable when you care about all permissions of the user.
- You should use vhost, configure_priv, write_priv, and read_priv options instead
if you care about permissions for just some vhosts.
required: false
default: []
vhost:
description:
- vhost to apply access privileges.
- This option will be ignored when permissions option is used.
required: false
default: /
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
write_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
read_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
force:
description:
- Deletes and recreates the user.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if user is to be added or removed
required: false
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Add user to server and assign full access control on / vhost.
# The user might have permission rules for other vhost but you don't care.
- rabbitmq_user: user=joe
password=changeme
vhost=/
configure_priv=.*
read_priv=.*
write_priv=.*
state=present
# Add user to server and assign full access control on / vhost.
# The user doesn't have permission rules for other vhosts
- rabbitmq_user: user=joe
password=changeme
permissions=[{vhost='/', configure_priv='.*', read_priv='.*', write_priv='.*'}]
state=present
'''
class RabbitMqUser(object):
def __init__(self, module, username, password, tags, permissions,
node, bulk_permissions=False):
self.module = module
self.username = username
self.password = password
self.node = node
if not tags:
self.tags = list()
else:
self.tags = tags.split(',')
self.permissions = permissions
self.bulk_permissions = bulk_permissions
self._tags = None
self._permissions = []
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q']
if self.node is not None:
cmd.extend(['-n', self.node])
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
if '\t' not in user_tag:
continue
user, tags = user_tag.split('\t')
if user == self.username:
for c in ['[',']',' ']:
tags = tags.replace(c, '')
if tags != '':
self._tags = tags.split(',')
else:
self._tags = list()
self._permissions = self._get_permissions()
return True
return False
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
perms_list = list()
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
if not self.bulk_permissions:
if vhost == self.permissions[0]['vhost']:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
break
else:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
return perms_list
def add(self):
if self.password is not None:
self._exec(['add_user', self.username, self.password])
else:
self._exec(['add_user', self.username, ''])
self._exec(['clear_password', self.username])
def delete(self):
self._exec(['delete_user', self.username])
def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
for permission in self._permissions:
if permission not in self.permissions:
cmd = ['clear_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
self._exec(cmd)
for permission in self.permissions:
if permission not in self._permissions:
cmd = ['set_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
cmd.append(permission['configure_priv'])
cmd.append(permission['write_priv'])
cmd.append(permission['read_priv'])
self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
def has_permissions_modifications(self):
return self._permissions != self.permissions
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None),
tags=dict(default=None),
permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
bulk_permissions = True
if permissions == []:
perm = {
'vhost': vhost,
'configure_priv': configure_priv,
'write_priv': write_priv,
'read_priv': read_priv
}
permissions.append(perm)
bulk_permissions = False
rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
node, bulk_permissions=bulk_permissions)
changed = False
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
changed = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
changed = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
changed = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
changed = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
changed = True
module.exit_json(changed=changed, user=username, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Nowheresly/odoo | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 36 | 36165 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
drop_view_if_exists,
)
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = dict.fromkeys(ids, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
cr.execute("""
SELECT sheet_id as id,
sum(total_attendance) as total_attendance,
sum(total_timesheet) as total_timesheet,
sum(total_difference) as total_difference
FROM hr_timesheet_sheet_sheet_day
WHERE sheet_id IN %s
GROUP BY sheet_id
""", (tuple(ids),))
res.update(dict((x.pop('id'), x) for x in cr.dictfetchall()))
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return fields.date.context_today(self, cr, uid, context)
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return fields.date.context_today(self, cr, uid, context)
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.employee_id.user_id and sheet.employee_id.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
toremove = []
analytic_timesheet = self.pool.get('hr.analytic.timesheet')
for sheet in self.browse(cr, uid, ids, context=context):
for timesheet in sheet.timesheet_ids:
toremove.append(timesheet.id)
analytic_timesheet.unlink(cr, uid, toremove, context=context)
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id),
('state', 'in', ['draft', 'new'])],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE %(date_to)s >= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(date_from)s <= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
drop_view_if_exists(cr, 'hr_timesheet_sheet_sheet_day')
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
timezone,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(orphan_attendances) != 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC'))
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
p.tz as timezone,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0 as orphan_attendances,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
JOIN hr_employee e ON s.employee_id = e.id
JOIN resource_resource r ON e.resource_id = r.id
LEFT JOIN res_users u ON r.user_id = u.id
LEFT JOIN res_partner p ON u.partner_id = p.id
group by l.date::date, s.id, timezone
) union (
select
-min(a.id) as id,
p.tz as timezone,
(a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END) as orphan_attendances,
SUM(((EXTRACT(hour FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))) * 60) + EXTRACT(minute FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')))) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
JOIN hr_employee e
ON a.employee_id = e.id
JOIN resource_resource r
ON e.resource_id = r.id
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE action in ('sign_in', 'sign_out')
group by (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date, s.id, timezone
)) AS foo
GROUP BY name, sheet_id, timezone
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
drop_view_if_exists(cr, 'hr_timesheet_sheet_sheet_account')
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zero-ui/miniblink49 | third_party/WebKit/Tools/Scripts/webkitpy/common/net/networktransaction.py | 190 | 2926 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import time
import urllib2
_log = logging.getLogger(__name__)
class NetworkTimeout(Exception):
def __str__(self):
return 'NetworkTimeout'
class NetworkTransaction(object):
def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
self._initial_backoff_seconds = initial_backoff_seconds
self._grown_factor = grown_factor
self._timeout_seconds = timeout_seconds
self._convert_404_to_None = convert_404_to_None
def run(self, request):
self._total_sleep = 0
self._backoff_seconds = self._initial_backoff_seconds
while True:
try:
return request()
except urllib2.HTTPError, e:
if self._convert_404_to_None and e.code == 404:
return None
self._check_for_timeout()
_log.warn("Received HTTP status %s loading \"%s\". Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
self._sleep()
def _check_for_timeout(self):
if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
raise NetworkTimeout()
def _sleep(self):
time.sleep(self._backoff_seconds)
self._total_sleep += self._backoff_seconds
self._backoff_seconds *= self._grown_factor
| gpl-3.0 |
talishte/ctigre | env/lib/python2.7/site-packages/fabfile/tag.py | 24 | 4227 | from __future__ import with_statement
from contextlib import nested
from fabric.api import abort, hide, local, settings, task
# Need to import this as fabric.version for reload() purposes
import fabric.version
# But nothing is stopping us from making a convenient binding!
_version = fabric.version.get_version
from utils import msg
def _seek_version(cmd, txt):
with nested(hide('running'), msg(txt)):
cmd = cmd % _version('short')
return local(cmd, capture=True)
def current_version_is_tagged():
return _seek_version(
'git tag | egrep "^%s$"',
"Searching for existing tag"
)
def current_version_is_changelogged(filename):
return _seek_version(
'egrep "^\* :release:\`%%s " %s' % filename,
"Looking for changelog entry"
)
def update_code(filename, force):
"""
Update version data structure in-code and commit that change to git.
Normally, if the version file has not been modified, we abort assuming the
user quit without saving. Specify ``force=yes`` to override this.
"""
raw_input("Version update in %r required! Press Enter to load $EDITOR." % filename)
with hide('running'):
local("$EDITOR %s" % filename)
# Try to detect whether user bailed out of the edit
with hide('running'):
has_diff = local("git diff -- %s" % filename, capture=True)
if not has_diff and not force:
abort("You seem to have aborted the file edit, so I'm aborting too.")
return filename
def commits_since_last_tag():
"""
Has any work been done since the last tag?
"""
with hide('running'):
return local("git log %s.." % _version('short'), capture=True)
@task(default=True)
def tag(force='no', push='no'):
"""
Tag a new release.
Normally, if a Git tag exists matching the current version, and no Git
commits appear after that tag, we abort assuming the user is making a
mistake or forgot to commit their work.
To override this -- i.e. to re-tag and re-upload -- specify ``force=yes``.
We assume you know what you're doing if you use this.
By default we do not push the tag remotely; specify ``push=yes`` to force a
``git push origin <tag>``.
"""
force = force.lower() in ['y', 'yes']
with settings(warn_only=True):
changed = []
# Does the current in-code version exist as a Git tag already?
# If so, this means we haven't updated the in-code version specifier
# yet, and need to do so.
if current_version_is_tagged():
# That is, if any work has been done since. Sanity check!
if not commits_since_last_tag() and not force:
abort("No work done since last tag!")
# Open editor, update version
version_file = "fabric/version.py"
changed.append(update_code(version_file, force))
# If the tag doesn't exist, the user has already updated version info
# and we can just move on.
else:
print("Version has already been updated, no need to edit...")
# Similar process but for the changelog.
changelog = "docs/changelog.rst"
if not current_version_is_changelogged(changelog):
changed.append(update_code(changelog, force))
else:
print("Changelog already updated, no need to edit...")
# Commit any changes
if changed:
with msg("Committing updated version and/or changelog"):
reload(fabric.version)
local("git add %s" % " ".join(changed))
local("git commit -m \"Cut %s\"" % _version('verbose'))
local("git push")
# At this point, we've incremented the in-code version and just need to
# tag it in Git.
f = 'f' if force else ''
with msg("Tagging"):
local("git tag -%sam \"Fabric %s\" %s" % (
f,
_version('normal'),
_version('short')
))
# And push to the central server, if we were told to
if push.lower() in ['y', 'yes']:
with msg("Pushing"):
local("git push origin %s" % _version('short'))
| bsd-2-clause |
7fever/script.pseudotv.live | service.py | 1 | 4196 | # Copyright (C) 2015 Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
import os, shutil, datetime, time, random
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
from time import sleep
from resources.lib.utils import *
# Plugin Info
ADDON_ID = 'script.pseudotv.live'
REAL_SETTINGS = xbmcaddon.Addon(id=ADDON_ID)
ADDON_ID = REAL_SETTINGS.getAddonInfo('id')
ADDON_NAME = REAL_SETTINGS.getAddonInfo('name')
ADDON_PATH = REAL_SETTINGS.getAddonInfo('path')
ADDON_VERSION = REAL_SETTINGS.getAddonInfo('version')
THUMB = (xbmc.translatePath(os.path.join(ADDON_PATH, 'resources', 'images')) + '/' + 'icon.png')
def HubSwap(): # Swap Org/Hub versions if 'Hub Installer' found.
icon = ADDON_PATH + '/icon'
HUB = xbmc.getCondVisibility('System.HasAddon(plugin.program.addoninstaller)') == 1
if HUB == True:
xbmc.log('script.pseudotv.live-Service: HubSwap = Hub Edition')
if REAL_SETTINGS.getSetting('Hub') == 'false':
xbmc.executebuiltin("Notification( %s, %s, %d, %s)" % ("PseudoTV Live","Hub-Edition Activated", 4000, THUMB) )
REAL_SETTINGS.setSetting("Hub","true")
else:
xbmc.log('script.pseudotv.live-Service: HubSwap = Master')
REAL_SETTINGS.setSetting("Hub","false")
return
def donorCHK():
DonorPath = (os.path.join(ADDON_PATH, 'resources', 'lib', 'Donor.pyo'))
DL_DonorPath = (os.path.join(ADDON_PATH, 'resources', 'lib', 'Donor.py'))
if xbmcvfs.exists(DonorPath) or xbmcvfs.exists(DL_DonorPath):
xbmc.log('script.pseudotv.live-Service: donorCHK = Donor')
REAL_SETTINGS.setSetting("AT_Donor", "true")
REAL_SETTINGS.setSetting("COM_Donor", "true")
REAL_SETTINGS.setSetting("TRL_Donor", "true")
REAL_SETTINGS.setSetting("CAT_Donor", "true")
# REAL_SETTINGS.setSetting("autoFindCommunity_Source", "1")
else:
xbmc.log('script.pseudotv.live-Service: donorCHK = FreeUser')
REAL_SETTINGS.setSetting("AT_Donor", "false")
REAL_SETTINGS.setSetting("COM_Donor", "false")
REAL_SETTINGS.setSetting("TRL_Donor", "false")
REAL_SETTINGS.setSetting("CAT_Donor", "false")
# REAL_SETTINGS.setSetting("autoFindCommunity_Source", "0")
return
def service():
xbmc.log('script.pseudotv.live-Service: Init')
try:
while (not xbmc.abortRequested):
if xbmcgui.Window(10000).getProperty("PseudoTVRunning") != "True":
xbmc.log("script.pseudotv.live-Service: Started")
donorCHK()
HubSwap()
if REAL_SETTINGS.getSetting("SyncXMLTV_Enabled") == "true":
SyncXMLTV()
if REAL_SETTINGS.getSetting("Auto_Start") == "true" and xbmcgui.Window(10000).getProperty("PseudoTVautostart") != "True":
xbmcgui.Window(10000).setProperty("PseudoTVautostart", "True")
autostart()
xbmc.log('script.pseudotv.live-Service: Idle')
xbmc.sleep(100000)
except:
pass
def autostart():
xbmc.log('script.pseudotv.live-Service: autostart')
xbmc.executebuiltin("Notification( %s, %s, %d, %s)" % ("AutoStart PseudoTV Live","Service Starting...", 4000, THUMB) )
AUTOSTART_TIMER = [0,5,10,15,20]#in seconds
IDLE_TIME = AUTOSTART_TIMER[int(REAL_SETTINGS.getSetting('timer_amount'))]
sleep(IDLE_TIME)
xbmc.executebuiltin('RunScript("' + ADDON_PATH + '/default.py' + '")')
return
service() | gpl-3.0 |
lowitty/server | libsLinux/pyasn1/type/tagmap.py | 172 | 2392 | from pyasn1 import error
class TagMap:
def __init__(self, posMap={}, negMap={}, defType=None):
self.__posMap = posMap.copy()
self.__negMap = negMap.copy()
self.__defType = defType
def __contains__(self, tagSet):
return tagSet in self.__posMap or \
self.__defType is not None and tagSet not in self.__negMap
def __getitem__(self, tagSet):
if tagSet in self.__posMap:
return self.__posMap[tagSet]
elif tagSet in self.__negMap:
raise error.PyAsn1Error('Key in negative map')
elif self.__defType is not None:
return self.__defType
else:
raise KeyError()
def __repr__(self):
s = self.__class__.__name__ + '('
if self.__posMap:
s = s + 'posMap=%r, ' % (self.__posMap,)
if self.__negMap:
s = s + 'negMap=%r, ' % (self.__negMap,)
if self.__defType is not None:
s = s + 'defType=%r' % (self.__defType,)
return s + ')'
def __str__(self):
s = self.__class__.__name__ + ':\n'
if self.__posMap:
s = s + 'posMap:\n%s, ' % ',\n '.join([ x.prettyPrintType() for x in self.__posMap.values()])
if self.__negMap:
s = s + 'negMap:\n%s, ' % ',\n '.join([ x.prettyPrintType() for x in self.__negMap.values()])
if self.__defType is not None:
s = s + 'defType:\n%s, ' % self.__defType.prettyPrintType()
return s
def clone(self, parentType, tagMap, uniq=False):
if self.__defType is not None and tagMap.getDef() is not None:
raise error.PyAsn1Error('Duplicate default value at %s' % (self,))
if tagMap.getDef() is not None:
defType = tagMap.getDef()
else:
defType = self.__defType
posMap = self.__posMap.copy()
for k in tagMap.getPosMap():
if uniq and k in posMap:
raise error.PyAsn1Error('Duplicate positive key %s' % (k,))
posMap[k] = parentType
negMap = self.__negMap.copy()
negMap.update(tagMap.getNegMap())
return self.__class__(
posMap, negMap, defType,
)
def getPosMap(self): return self.__posMap.copy()
def getNegMap(self): return self.__negMap.copy()
def getDef(self): return self.__defType
| mit |
lakshmi-kannan/st2contrib | packs/twitter/sensors/twitter_search_sensor.py | 6 | 3371 | from TwitterSearch import TwitterSearch
from TwitterSearch import TwitterSearchOrder
from st2reactor.sensor.base import PollingSensor
__all__ = [
'TwitterSearchSensor'
]
BASE_URL = 'https://twitter.com'
class TwitterSearchSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=None):
super(TwitterSearchSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'twitter.matched_tweet'
self._logger = self._sensor_service.get_logger(__name__)
def setup(self):
self._client = TwitterSearch(
consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret']
)
self._last_id = None
def poll(self):
tso = TwitterSearchOrder()
tso.set_keywords([self._config['query']])
language = self._config.get('language', None)
if language:
tso.set_language(language)
tso.set_result_type('recent')
tso.set_count(self._config.get('count', 30))
tso.set_include_entities(False)
last_id = self._get_last_id()
if last_id:
tso.set_since_id(int(last_id))
try:
tweets = self._client.search_tweets(tso)
tweets = tweets['content']['statuses']
except Exception as e:
self._logger.exception('Polling Twitter failed: %s' % (str(e)))
return
tweets = list(reversed(tweets))
if tweets:
self._set_last_id(last_id=tweets[-1]['id'])
for tweet in tweets:
self._dispatch_trigger_for_tweet(tweet=tweet)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self):
if not self._last_id and hasattr(self._sensor_service, 'get_value'):
self._last_id = self._sensor_service.get_value(name='last_id')
return self._last_id
def _set_last_id(self, last_id):
self._last_id = last_id
if hasattr(self._sensor_service, 'set_value'):
self._sensor_service.set_value(name='last_id', value=last_id)
def _dispatch_trigger_for_tweet(self, tweet):
trigger = self._trigger_ref
url = '%s/%s/status/%s' % (BASE_URL, tweet['user']['screen_name'], tweet['id'])
payload = {
'id': tweet['id'],
'created_at': tweet['created_at'],
'lang': tweet['lang'],
'place': tweet['place'],
'retweet_count': tweet['retweet_count'],
'favorite_count': tweet['favorite_count'],
'user': {
'screen_name': tweet['user']['screen_name'],
'name': tweet['user']['name'],
'location': tweet['user']['location'],
'description': tweet['user']['description'],
},
'text': tweet['text'],
'url': url
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
| apache-2.0 |
bartoldeman/easybuild-framework | easybuild/toolchains/compiler/clang.py | 1 | 5280 | ##
# Copyright 2013-2018 Ghent University
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for Clang as toolchain compiler.
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
import easybuild.tools.systemtools as systemtools
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.toolchain.compiler import Compiler
TC_CONSTANT_CLANG = "Clang"
class Clang(Compiler):
"""Clang compiler class"""
COMPILER_MODULE_NAME = ['Clang']
COMPILER_FAMILY = TC_CONSTANT_CLANG
# Don't set COMPILER_FAMILY in this class because Clang does not have
# Fortran support, and thus it is not a complete compiler as far as
# EasyBuild is concerned.
COMPILER_UNIQUE_OPTS = {
'loop-vectorize': (False, "Loop vectorization"),
'basic-block-vectorize': (False, "Basic block vectorization"),
}
COMPILER_UNIQUE_OPTION_MAP = {
'unroll': 'funroll-loops',
'loop-vectorize': ['fvectorize'],
'basic-block-vectorize': ['fslp-vectorize'],
'optarch':'march=native',
# Clang's options do not map well onto these precision modes. The flags enable and disable certain classes of
# optimizations.
#
# -fassociative-math: allow re-association of operands in series of floating-point operations, violates the
# ISO C and C++ language standard by possibly changing computation result.
# -freciprocal-math: allow optimizations to use the reciprocal of an argument rather than perform division.
# -fsigned-zeros: do not allow optimizations to treat the sign of a zero argument or result as insignificant.
# -fhonor-infinities: disallow optimizations to assume that arguments and results are not +/- Infs.
# -fhonor-nans: disallow optimizations to assume that arguments and results are not +/- NaNs.
# -ffinite-math-only: allow optimizations for floating-point arithmetic that assume that arguments and results
# are not NaNs or +-Infs (equivalent to -fno-honor-nans -fno-honor-infinities)
# -funsafe-math-optimizations: allow unsafe math optimizations (implies -fassociative-math, -fno-signed-zeros,
# -freciprocal-math).
# -ffast-math: an umbrella flag that enables all optimizations listed above, provides preprocessor macro
# __FAST_MATH__.
#
# Using -fno-fast-math is equivalent to disabling all individual optimizations, see
# http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Driver/Tools.cpp?view=markup (lines 2100 and following)
#
# 'strict', 'precise' and 'defaultprec' are all ISO C++ and IEEE complaint, but we explicitly specify details
# flags for strict and precise for robustness against future changes.
'strict': ['fno-fast-math'],
'precise': ['fno-unsafe-math-optimizations'],
'defaultprec': [],
'loose': ['ffast-math', 'fno-unsafe-math-optimizations'],
'veryloose': ['ffast-math'],
'vectorize': {False: 'fno-vectorize', True: 'fvectorize'},
}
# used when 'optarch' toolchain option is enabled (and --optarch is not specified)
COMPILER_OPTIMAL_ARCHITECTURE_OPTION = {
(systemtools.POWER, systemtools.POWER): 'mcpu=native', # no support for march=native on POWER
(systemtools.POWER, systemtools.POWER_LE): 'mcpu=native', # no support for march=native on POWER
(systemtools.X86_64, systemtools.AMD): 'march=native',
(systemtools.X86_64, systemtools.INTEL): 'march=native',
}
# used with --optarch=GENERIC
COMPILER_GENERIC_OPTION = {
(systemtools.X86_64, systemtools.AMD): 'march=x86-64 -mtune=generic',
(systemtools.X86_64, systemtools.INTEL): 'march=x86-64 -mtune=generic',
}
COMPILER_CC = 'clang'
COMPILER_CXX = 'clang++'
COMPILER_C_UNIQUE_FLAGS = []
LIB_MULTITHREAD = ['pthread']
LIB_MATH = ['m']
def _set_compiler_vars(self):
"""Set compiler variables."""
super(Clang, self)._set_compiler_vars()
if self.options.get('32bit', None):
raise EasyBuildError("_set_compiler_vars: 32bit set, but no support yet for 32bit Clang in EasyBuild")
| gpl-2.0 |
syjzwjj/z3 | src/api/python/z3util.py | 15 | 10120 | """
Usage:
import common_z3 as CM_Z3
"""
import common as CM
from z3 import *
def get_z3_version(as_str=False):
major = ctypes.c_uint(0)
minor = ctypes.c_uint(0)
build = ctypes.c_uint(0)
rev = ctypes.c_uint(0)
Z3_get_version(major,minor,build,rev)
rs = map(int,(major.value,minor.value,build.value,rev.value))
if as_str:
return "{}.{}.{}.{}".format(*rs)
else:
return rs
def ehash(v):
"""
Returns a 'stronger' hash value than the default hash() method.
The result from hash() is not enough to distinguish between 2
z3 expressions in some cases.
>>> x1 = Bool('x'); x2 = Bool('x'); x3 = Int('x')
>>> print(x1.hash(),x2.hash(),x3.hash()) #BAD: all same hash values
783810685 783810685 783810685
>>> print(ehash(x1), ehash(x2), ehash(x3))
x_783810685_1 x_783810685_1 x_783810685_2
"""
if __debug__:
assert is_expr(v)
return "{}_{}_{}".format(str(v),v.hash(),v.sort_kind())
"""
In Z3, variables are caleld *uninterpreted* consts and
variables are *interpreted* consts.
"""
def is_expr_var(v):
"""
EXAMPLES:
>>> is_expr_var(Int('7'))
True
>>> is_expr_var(IntVal('7'))
False
>>> is_expr_var(Bool('y'))
True
>>> is_expr_var(Int('x') + 7 == Int('y'))
False
>>> LOnOff, (On,Off) = EnumSort("LOnOff",['On','Off'])
>>> Block,Reset,SafetyInjection=Consts("Block Reset SafetyInjection",LOnOff)
>>> is_expr_var(LOnOff)
False
>>> is_expr_var(On)
False
>>> is_expr_var(Block)
True
>>> is_expr_var(SafetyInjection)
True
"""
return is_const(v) and v.decl().kind()==Z3_OP_UNINTERPRETED
def is_expr_val(v):
"""
EXAMPLES:
>>> is_expr_val(Int('7'))
False
>>> is_expr_val(IntVal('7'))
True
>>> is_expr_val(Bool('y'))
False
>>> is_expr_val(Int('x') + 7 == Int('y'))
False
>>> LOnOff, (On,Off) = EnumSort("LOnOff",['On','Off'])
>>> Block,Reset,SafetyInjection=Consts("Block Reset SafetyInjection",LOnOff)
>>> is_expr_val(LOnOff)
False
>>> is_expr_val(On)
True
>>> is_expr_val(Block)
False
>>> is_expr_val(SafetyInjection)
False
"""
return is_const(v) and v.decl().kind()!=Z3_OP_UNINTERPRETED
def get_vars(f,rs=[]):
"""
>>> x,y = Ints('x y')
>>> a,b = Bools('a b')
>>> get_vars(Implies(And(x+y==0,x*2==10),Or(a,Implies(a,b==False))))
[x, y, a, b]
"""
if __debug__:
assert is_expr(f)
if is_const(f):
if is_expr_val(f):
return rs
else: #variable
return CM.vset(rs + [f],str)
else:
for f_ in f.children():
rs = get_vars(f_,rs)
return CM.vset(rs,str)
def mk_var(name,vsort):
if vsort.kind() == Z3_INT_SORT:
v = Int(name)
elif vsort.kind() == Z3_REAL_SORT:
v = Real(name)
elif vsort.kind() == Z3_BOOL_SORT:
v = Bool(name)
elif vsort.kind() == Z3_DATATYPE_SORT:
v = Const(name,vsort)
else:
assert False, 'Cannot handle this sort (s: %sid: %d)'\
%(vsort,vsort.kind())
return v
def prove(claim,assume=None,verbose=0):
"""
>>> r,m = prove(BoolVal(True),verbose=0); r,model_str(m,as_str=False)
(True, None)
#infinite counter example when proving contradiction
>>> r,m = prove(BoolVal(False)); r,model_str(m,as_str=False)
(False, [])
>>> x,y,z=Bools('x y z')
>>> r,m = prove(And(x,Not(x))); r,model_str(m,as_str=True)
(False, '[]')
>>> r,m = prove(True,assume=And(x,Not(x)),verbose=0)
Traceback (most recent call last):
...
AssertionError: Assumption is alway False!
>>> r,m = prove(Implies(x,x),assume=y,verbose=2); r,model_str(m,as_str=False)
assume:
y
claim:
Implies(x, x)
to_prove:
Implies(y, Implies(x, x))
(True, None)
>>> r,m = prove(And(x,True),assume=y,verbose=0); r,model_str(m,as_str=False)
(False, [(x, False), (y, True)])
>>> r,m = prove(And(x,y),assume=y,verbose=0)
>>> print(r)
False
>>> print(model_str(m,as_str=True))
x = False
y = True
>>> a,b = Ints('a b')
>>> r,m = prove(a**b == b**a,assume=None,verbose=0)
E: cannot solve !
>>> r is None and m is None
True
"""
if __debug__:
assert not assume or is_expr(assume)
to_prove = claim
if assume:
if __debug__:
is_proved,_ = prove(Not(assume))
def _f():
emsg = "Assumption is alway False!"
if verbose >= 2:
emsg = "{}\n{}".format(assume,emsg)
return emsg
assert is_proved==False, _f()
to_prove = Implies(assume,to_prove)
if verbose >= 2:
print('assume: ')
print(assume)
print('claim: ')
print(claim)
print('to_prove: ')
print(to_prove)
f = Not(to_prove)
models = get_models(f,k=1)
if models is None: #unknown
print('E: cannot solve !')
return None, None
elif models == False: #unsat
return True,None
else: #sat
if __debug__:
assert isinstance(models,list)
if models:
return False, models[0] #the first counterexample
else:
return False, [] #infinite counterexample,models
def get_models(f,k):
"""
Returns the first k models satisfiying f.
If f is not satisfiable, returns False.
If f cannot be solved, returns None
If f is satisfiable, returns the first k models
Note that if f is a tautology, e.g.\ True, then the result is []
Based on http://stackoverflow.com/questions/11867611/z3py-checking-all-solutions-for-equation
EXAMPLES:
>>> x, y = Ints('x y')
>>> len(get_models(And(0<=x,x <= 4),k=11))
5
>>> get_models(And(0<=x**y,x <= 1),k=2) is None
True
>>> get_models(And(0<=x,x <= -1),k=2)
False
>>> len(get_models(x+y==7,5))
5
>>> len(get_models(And(x<=5,x>=1),7))
5
>>> get_models(And(x<=0,x>=5),7)
False
>>> x = Bool('x')
>>> get_models(And(x,Not(x)),k=1)
False
>>> get_models(Implies(x,x),k=1)
[]
>>> get_models(BoolVal(True),k=1)
[]
"""
if __debug__:
assert is_expr(f)
assert k>=1
s = Solver()
s.add(f)
models = []
i = 0
while s.check() == sat and i < k:
i = i + 1
m = s.model()
if not m: #if m == []
break
models.append(m)
#create new constraint to block the current model
block = Not(And([v() == m[v] for v in m]))
s.add(block)
if s.check() == unknown:
return None
elif s.check() == unsat and i==0:
return False
else:
return models
def is_tautology(claim,verbose=0):
"""
>>> is_tautology(Implies(Bool('x'),Bool('x')))
True
>>> is_tautology(Implies(Bool('x'),Bool('y')))
False
>>> is_tautology(BoolVal(True))
True
>>> is_tautology(BoolVal(False))
False
"""
return prove(claim=claim,assume=None,verbose=verbose)[0]
def is_contradiction(claim,verbose=0):
"""
>>> x,y=Bools('x y')
>>> is_contradiction(BoolVal(False))
True
>>> is_contradiction(BoolVal(True))
False
>>> is_contradiction(x)
False
>>> is_contradiction(Implies(x,y))
False
>>> is_contradiction(Implies(x,x))
False
>>> is_contradiction(And(x,Not(x)))
True
"""
return prove(claim=Not(claim),assume=None,verbose=verbose)[0]
def exact_one_model(f):
"""
return True if f has exactly 1 model, False otherwise.
EXAMPLES:
>>> x, y = Ints('x y')
>>> exact_one_model(And(0<=x**y,x <= 0))
False
>>> exact_one_model(And(0<=x,x <= 0))
True
>>> exact_one_model(And(0<=x,x <= 1))
False
>>> exact_one_model(And(0<=x,x <= -1))
False
"""
models = get_models(f,k=2)
if isinstance(models,list):
return len(models)==1
else:
return False
def myBinOp(op,*L):
"""
>>> myAnd(*[Bool('x'),Bool('y')])
And(x, y)
>>> myAnd(*[Bool('x'),None])
x
>>> myAnd(*[Bool('x')])
x
>>> myAnd(*[])
>>> myAnd(Bool('x'),Bool('y'))
And(x, y)
>>> myAnd(*[Bool('x'),Bool('y')])
And(x, y)
>>> myAnd([Bool('x'),Bool('y')])
And(x, y)
>>> myAnd((Bool('x'),Bool('y')))
And(x, y)
>>> myAnd(*[Bool('x'),Bool('y'),True])
Traceback (most recent call last):
...
AssertionError
"""
if __debug__:
assert op == Z3_OP_OR or op == Z3_OP_AND or op == Z3_OP_IMPLIES
if len(L)==1 and (isinstance(L[0],list) or isinstance(L[0],tuple)):
L = L[0]
if __debug__:
assert all(not isinstance(l,bool) for l in L)
L = [l for l in L if is_expr(l)]
if L:
if len(L)==1:
return L[0]
else:
if op == Z3_OP_OR:
return Or(L)
elif op == Z3_OP_AND:
return And(L)
else: #IMPLIES
return Implies(L[0],L[1])
else:
return None
def myAnd(*L): return myBinOp(Z3_OP_AND,*L)
def myOr(*L): return myBinOp(Z3_OP_OR,*L)
def myImplies(a,b):return myBinOp(Z3_OP_IMPLIES,[a,b])
Iff = lambda f: And(Implies(f[0],f[1]),Implies(f[1],f[0]))
def model_str(m,as_str=True):
"""
Returned a 'sorted' model (so that it's easier to see)
The model is sorted by its key,
e.g. if the model is y = 3 , x = 10, then the result is
x = 10, y = 3
EXAMPLES:
see doctest exampels from function prove()
"""
if __debug__:
assert m is None or m == [] or isinstance(m,ModelRef)
if m :
vs = [(v,m[v]) for v in m]
vs = sorted(vs,key=lambda a: str(a[0]))
if as_str:
return '\n'.join(['{} = {}'.format(k,v) for (k,v) in vs])
else:
return vs
else:
return str(m) if as_str else m
| mit |
cypridina/gloTK | multiproc.py | 1 | 1198 | #!/usr/bin/env python3
# gloTK - Genomes of Luminous Organisms Toolkit
# Copyright (c) 2015-2016 Darrin Schultz. All rights reserved.
#
# This file is part of gloTK.
#
# GloTK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GloTK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GloTK. If not, see <http://www.gnu.org/licenses/>.
"""
import this class whenever you want to do something with argparse or
multiprocessing
"""
#took this gist from here:
# https://gist.github.com/brantfaircloth/1252339
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest,
os.path.abspath(os.path.expanduser(values)))
| gpl-3.0 |
akionakamura/scikit-learn | sklearn/tests/test_lda.py | 71 | 5883 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
| bsd-3-clause |
mrbox/django | tests/gis_tests/rasterapp/test_rasterfield.py | 38 | 3359 | import json
from django.contrib.gis.shortcuts import numpy
from django.core.exceptions import ImproperlyConfigured
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from ..data.rasters.textrasters import JSON_RASTER
from ..models import models
from .models import RasterModel
@skipUnlessDBFeature('supports_raster')
class RasterFieldTest(TransactionTestCase):
available_apps = ['gis_tests.rasterapp']
def test_field_null_value(self):
"""
Test creating a model where the RasterField has a null value.
"""
r = RasterModel.objects.create(rast=None)
r.refresh_from_db()
self.assertIsNone(r.rast)
def test_access_band_data_directly_from_queryset(self):
RasterModel.objects.create(rast=JSON_RASTER)
qs = RasterModel.objects.all()
qs[0].rast.bands[0].data()
def test_model_creation(self):
"""
Test RasterField through a test model.
"""
# Create model instance from JSON raster
r = RasterModel.objects.create(rast=JSON_RASTER)
r.refresh_from_db()
# Test raster metadata properties
self.assertEqual((5, 5), (r.rast.width, r.rast.height))
self.assertEqual([0.0, -1.0, 0.0, 0.0, 0.0, 1.0], r.rast.geotransform)
self.assertIsNone(r.rast.bands[0].nodata_value)
# Compare srs
self.assertEqual(r.rast.srs.srid, 4326)
# Compare pixel values
band = r.rast.bands[0].data()
# If numpy, convert result to list
if numpy:
band = band.flatten().tolist()
# Loop through rows in band data and assert single
# value is as expected.
self.assertEqual(
[
0.0, 1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0, 9.0,
10.0, 11.0, 12.0, 13.0, 14.0,
15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0
],
band
)
def test_implicit_raster_transformation(self):
"""
Test automatic transformation of rasters with srid different from the
field srid.
"""
# Parse json raster
rast = json.loads(JSON_RASTER)
# Update srid to another value
rast['srid'] = 3086
# Save model and get it from db
r = RasterModel.objects.create(rast=rast)
r.refresh_from_db()
# Confirm raster has been transformed to the default srid
self.assertEqual(r.rast.srs.srid, 4326)
# Confirm geotransform is in lat/lon
self.assertEqual(
r.rast.geotransform,
[-87.9298551266551, 9.459646421449934e-06, 0.0,
23.94249275457565, 0.0, -9.459646421449934e-06]
)
def test_verbose_name_arg(self):
"""
RasterField should accept a positional verbose name argument.
"""
self.assertEqual(
RasterModel._meta.get_field('rast').verbose_name,
'A Verbose Raster Name'
)
@mock.patch('django.contrib.gis.db.models.fields.HAS_GDAL', False)
class RasterFieldWithoutGDALTest(TestCase):
def test_raster_field_without_gdal_exception(self):
msg = 'RasterField requires GDAL.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
models.OriginalRasterField()
| bsd-3-clause |
andrewosh/bolt | bolt/spark/stack.py | 3 | 4932 | from numpy import asarray, ndarray, concatenate
from bolt.spark.utils import zip_with_index
class StackedArray(object):
"""
Wraps a BoltArraySpark and provides an interface for performing
stacked operations (operations on aggregated subarrays). Many methods
will be restricted or forbidden until the Stacked object is
unstacked. Currently, only map() is implemented. The rationale
is that many operations will work faster when vectorized over a
slightly larger array.
The implementation uses an intermediate RDD that collects all
records on a given partition into 'stacked' (key, value) records.
Here, a key is a 'size' long tuple of original record keys,
and and values is a an array of the corresponding values,
concatenated along a new 0th dimenion.
"""
_metadata = ['_rdd', '_shape', '_split', '_rekeyed']
def __init__(self, rdd, shape=None, split=None, rekeyed=False):
self._rdd = rdd
self._shape = shape
self._split = split
self._rekeyed = rekeyed
def __finalize__(self, other):
for name in self._metadata:
other_attr = getattr(other, name, None)
if (other_attr is not None) and (getattr(self, name, None) is None):
object.__setattr__(self, name, other_attr)
return self
@property
def shape(self):
return self._shape
@property
def split(self):
return self._split
@property
def rekey(self):
return self._rekeyed
@property
def _constructor(self):
return StackedArray
def stack(self, size):
"""
Make an intermediate RDD where all records are combined into a
list of keys and larger ndarray along a new 0th dimension.
"""
def tostacks(partition):
keys = []
arrs = []
for key, arr in partition:
keys.append(key)
arrs.append(arr)
if size and 0 <= size <= len(keys):
yield (keys, asarray(arrs))
keys, arrs = [], []
if keys:
yield (keys, asarray(arrs))
rdd = self._rdd.mapPartitions(tostacks)
return self._constructor(rdd).__finalize__(self)
def unstack(self):
"""
Unstack array and return a new BoltArraySpark via flatMap().
"""
from bolt.spark.array import BoltArraySpark
if self._rekeyed:
rdd = self._rdd
else:
rdd = self._rdd.flatMap(lambda kv: zip(kv[0], list(kv[1])))
return BoltArraySpark(rdd, shape=self.shape, split=self.split)
def map(self, func):
"""
Apply a function on each subarray.
Parameters
----------
func : function
This is applied to each value in the intermediate RDD.
Returns
-------
StackedArray
"""
vshape = self.shape[self.split:]
x = self._rdd.values().first()
if x.shape == vshape:
a, b = asarray([x]), asarray([x, x])
else:
a, b = x, concatenate((x, x))
try:
atest = func(a)
btest = func(b)
except Exception as e:
raise RuntimeError("Error evaluating function on test array, got error:\n %s" % e)
if not (isinstance(atest, ndarray) and isinstance(btest, ndarray)):
raise ValueError("Function must return ndarray")
# different shapes map to the same new shape
elif atest.shape == btest.shape:
if self._rekeyed is True:
# we've already rekeyed
rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1])))
shape = (self.shape[0],) + atest.shape
else:
# do the rekeying
count, rdd = zip_with_index(self._rdd.values())
rdd = rdd.map(lambda kv: ((kv[1],), func(kv[0])))
shape = (count,) + atest.shape
split = 1
rekeyed = True
# different shapes stay different (along the first dimension)
elif atest.shape[0] == a.shape[0] and btest.shape[0] == b.shape[0]:
shape = self.shape[0:self.split] + atest.shape[1:]
split = self.split
rdd = self._rdd.map(lambda kv: (kv[0], func(kv[1])))
rekeyed = self._rekeyed
else:
raise ValueError("Cannot infer effect of function on shape")
return self._constructor(rdd, rekeyed=rekeyed, shape=shape, split=split).__finalize__(self)
def tordd(self):
"""
Return the RDD wrapped by the StackedArray.
Returns
-------
RDD
"""
return self._rdd
def __str__(self):
s = "Stacked BoltArray\n"
s += "shape: %s\n" % str(self.shape)
return s
def __repr__(self):
return str(self)
| apache-2.0 |
SurveyMan/SMPy | surveyman/examples/example_survey.py | 1 | 6639 | # -*- coding: cp1252 -*-
#example survey based on https://github.com/etosch/SurveyMan/blob/master/data/Ipierotis.csv
#outputs JSON representation
import surveyman.survey.questions as questions
import surveyman.survey.blocks as blocks
import surveyman.survey.constraints as constraints
import surveyman.survey.options as options
import surveyman.survey.surveys as surveys
def create_survey():
oneof = "oneof"
#question 1
q1 = questions.Question(oneof, "What is your gender?", [])
q1.add_option("Male")
q1.add_option("Female")
q1.add_option("Other")
#print q1
#question 2
q2 = questions.Question(oneof, "What is your year of birth?", [options.Option(str(x)) for x in range(1950, 1996)])
#print q2
#question 3
q3 = questions.Question(oneof, "Which of the following best describes your highest achieved education level?", [])
q3.add_option("Some High School")
q3.add_option("High School Graduate")
q3.add_option("Some College, no Degree")
q3.add_option("Associates Degree")
q3.add_option("Bachelors Degree")
q3.add_option("Graduate Degree, Masters")
q3.add_option("Graduate Degree, Doctorate")
#print q3
#question 4
q4 = questions.Question(oneof, "What is the total income of your household?", [])
q4.add_option("Less than $10,000")
q4.add_option("$10,000 - $14,999")
q4.add_option("$15,000 - $24,999")
q4.add_option("$25,000 - $39,499")
q4.add_option("$40,500 - $59,999")
q4.add_option("$60,000 - $74,999")
q4.add_option("$75,000 - $99,999")
q4.add_option("$100,000 - $149,999")
q4.add_option("More than $150,000")
#print q4
#question 5
q5 = questions.Question(oneof, "What is your marital status?", [])
q5.add_option("Cohabitating")
q5.add_option("Divorced")
q5.add_option("Engaged")
q5.add_option("Married")
q5.add_option("Separated")
q5.add_option("Single")
q5.add_option("Widowed")
#print q5
#question 6
q6 = questions.Question(oneof, "Do you have children?", [])
q6.add_option("No children")
q6.add_option("Yes, 1 child")
q6.add_option("Yes, 2 children")
q6.add_option("Yes, 3 children")
q6.add_option("Yes, 4 children")
#print q6
#question 7
q7 = questions.Question(oneof, "How many members in your household?", [options.Option(str(x)) for x in range(1, 4)])
#print q7
#question 8
q8 = questions.Question(oneof, "In which country do you live?", [])
q8.add_option("United States")
q8.add_option("India")
q8.add_option("Other")
#print q8
#question 9
q9 = questions.Question(oneof, "Please indicate your race.", [])
q9.add_option("American Indian or Alaska Native")
q9.add_option("Asian")
q9.add_option("Black Latino")
q9.add_option("Black or African American")
q9.add_option("Native Hawaiian or Other Pacific Islander")
q9.add_option("White Latino")
q9.add_option("White")
q9.add_option("2 or more races")
q9.add_option("Unknown")
#print q9
#question 10
q10 = questions.Question(oneof, "Why do you complete tasks in Mechanical Turk? Please check any of the following that applies:", [])
q10.add_option("Fruitful way to spend free time and get some cash (e.g., instead of watching TV).")
q10.add_option("For primary income purposes (e.g., gas, bills, groceries, credit cards).")
q10.add_option("For secondary income purposes, pocket change (for hobbies, gadgets, going out).")
q10.add_option("To kill time.")
q10.add_option("I find the tasks to be fun.")
q10.add_option("I am currently unemployed, or have only a part time job.")
#question 11
q11 = questions.Question(oneof, "Has the recession affected your decision to participate on MTurk?", [])
q11.add_option("Yes")
q11.add_option("No")
#question 12
q12 = questions.Question(oneof, "Has the recession affected your level of participation on MTurk?", [])
q12.add_option("Yes")
q12.add_option("No")
#question 13
q13 = questions.Question(oneof, "For how long have you been working on Amazon Mechanical Turk?", [])
q13.add_option("< 6 mos.")
q13.add_option("6mos-1yr")
q13.add_option("1-2yrs")
q13.add_option("2-3yrs")
q13.add_option("3-5yrs")
q13.add_option("5-7yrs")
q13.add_option("7-9yrs")
q13.add_option("9-15yrs")
q13.add_option("15+")
#question 14
q14 = questions.Question(oneof, "How much do you earn per week on Mechanical Turk?", [])
q14.add_option("Less than $1 per week")
q14.add_option("$1-$5 per week.")
q14.add_option("$5-$10 per week.")
q14.add_option("$10-$20 per week.")
q14.add_option("$20-$50 per week.")
q14.add_option("$50-$100 per week.")
q14.add_option("$100-$200 per week.")
q14.add_option("$200-$500 per week.")
q14.add_option("More than $500 per week.")
#question 15
q15 = questions.Question(oneof, "How much time do you spend per week on Mechanical Turk?", [])
q15.add_option("Less than 1 hour per week.")
q15.add_option("1-2 hours per week.")
q15.add_option("2-4 hours per week.")
q15.add_option("4-8 hours per week.")
q15.add_option("8-20 hours per week.")
q15.add_option("20-40 hours per week.")
q15.add_option("More than 40 hours per week.")
#question 16
q16 = questions.Question(oneof, "How many HITs do you complete per week on Mechanical Turk?", [])
q16.add_option("Less than 1 HIT per week.")
q16.add_option("1-5 HITs per week.")
q16.add_option("5-10 HITs per week.")
q15.add_option("10-20 HITs per week.")
q16.add_option("20-50 HITs per week.")
q16.add_option("50-100 HITs per week.")
q16.add_option("100-200 HITs per week.")
q16.add_option("200-500 HITs per week.")
q16.add_option("500-1000 HITs per week.")
q16.add_option("1000-5000 HITs per week.")
q16.add_option("More than 5000 HITs per week.")
q17 = questions.Question(oneof, "In which state do you live?", [])
q17.add_option("Massachusetts")
q17.add_option("some other state (too many to list)")
block1 = blocks.Block([q1, q2, q3, q4, q5, q6, q7, q8, q9])
block2 = blocks.Block([q17])
block3 = blocks.Block([q10, q11, q12, q13, q14, q15, q16])
branch1 = constraints.Constraint(q8)
branch1.add_branch_by_index(0, block2)
branch1.add_branch_by_index(1, block3)
branch1.add_branch_by_index(2, block3)
#print str(branch1)
survey = surveys.Survey([block1, block2, block3], [branch1])
## jsonfile = open("survey1.json", "wb")
## jsonfile.write(survey.jsonize())
## jsonfile.close()
return survey | apache-2.0 |
vivyly/fancast | fancast/casting/serializers.py | 1 | 2476 | #/usr/bin/env python
from rest_framework import serializers
from .models import (Project,
Character,
Prospect,
ProspectVote,
Actor)
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Project
fields = ('slug', 'origin', 'derivation',
'origin_title', 'derived_title' )
class ActorSerializer(serializers.ModelSerializer):
class Meta:
model = Actor
fields = ('slug', 'name', 'normalized',
'image', 'description')
class ProspectVoteSerializer(serializers.ModelSerializer):
class Meta:
model = ProspectVote
fields = ('sessionid', 'vote_status')
class ProspectSerializer(serializers.ModelSerializer):
actor = ActorSerializer()
upvotes = serializers.SerializerMethodField('get_upvotes')
downvotes = serializers.SerializerMethodField('get_downvotes')
total = serializers.SerializerMethodField('get_total')
has_upvoted = serializers.SerializerMethodField('get_has_upvoted')
has_downvoted = serializers.SerializerMethodField('get_has_downvoted')
class Meta:
model = Prospect
fields = ('slug', 'actor', 'upvotes', 'total', 'downvotes', 'has_upvoted', 'has_downvoted')
def get_upvotes(self, obj):
return obj.upvotes
def get_downvotes(self, obj):
return obj.downvotes
def get_has_upvoted(self, obj):
try:
request = self.context['request']
sessionid = request.COOKIES.get('sessionid')
except KeyError:
sessionid = ''
return any(ProspectVote.objects.filter(sessionid=sessionid,
vote_status=1, prospect=obj))
def get_has_downvoted(self, obj):
try:
request = self.context['request']
sessionid = request.COOKIES.get('sessionid')
except KeyError:
sessionid = ''
return any(ProspectVote.objects.filter(sessionid=sessionid,
vote_status=-1, prospect=obj))
def get_total(self, obj):
return obj.totalvotes
class CharacterSerializer(serializers.ModelSerializer):
prospects = ProspectSerializer(many=True)
class Meta:
model = Character
fields = ('slug', 'name', 'normalized', 'image',
'description', 'project', 'order', 'prospects')
| bsd-3-clause |
egoid/baytree | lib/python2.7/site-packages/django/contrib/gis/utils/ogrinspect.py | 391 | 9090 | """
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
| mit |
anandpdoshi/erpnext | erpnext/accounts/doctype/cheque_print_template/cheque_print_template.py | 2 | 3767 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class ChequePrintTemplate(Document):
pass
@frappe.whitelist()
def create_or_update_cheque_print_format(template_name):
if not frappe.db.exists("Print Format", template_name):
cheque_print = frappe.new_doc("Print Format")
cheque_print.update({
"doc_type": "Payment Entry",
"standard": "No",
"custom_format": 1,
"print_format_type": "Server",
"name": template_name
})
else:
cheque_print = frappe.get_doc("Print Format", template_name)
doc = frappe.get_doc("Cheque Print Template", template_name)
cheque_print.html = """
<div style="position: relative; top:%(starting_position_from_top_edge)scm">
<div style="width:%(cheque_width)scm;height:%(cheque_height)scm;">
<span style="top: {{ %(acc_pay_dist_from_top_edge)s }}cm; left: {{ %(acc_pay_dist_from_left_edge)s }}cm;
border-bottom: solid 1px;border-top:solid 1px; position: absolute;">
%(message_to_show)s
</span>
<span style="top:%(date_dist_from_top_edge)s cm; left:%(date_dist_from_left_edge)scm;
position: absolute;">
{{doc.reference_date or '' }}
</span>
<span style="top:%(acc_no_dist_from_top_edge)scm;left:%(acc_no_dist_from_left_edge)scm;
position: absolute;">
{{ doc.account_no or '' }}
</span>
<span style="top:%(payer_name_from_top_edge)scm;left: %(payer_name_from_left_edge)scm;
position: absolute;">
{{doc.party}}
</span>
<span style="top:%(amt_in_words_from_top_edge)scm; left:%(amt_in_words_from_left_edge)scm;
position: absolute; display: block; width: %(amt_in_word_width)scm;
line-height:%(amt_in_words_line_spacing)scm; word-wrap: break-word;">
{{frappe.utils.money_in_words(doc.base_paid_amount or doc.base_received_amount)}}
</span>
<span style="top:%(amt_in_figures_from_top_edge)scm;left: %(amt_in_figures_from_left_edge)scm;
position: absolute;">
{{doc.get_formatted("base_paid_amount") or doc.get_formatted("base_received_amount")}}
</span>
<span style="top:%(signatory_from_top_edge)scm;left: %(signatory_from_left_edge)scm;
position: absolute;">
{{doc.company}}
</span>
</div>
</div>"""%{
"starting_position_from_top_edge": doc.starting_position_from_top_edge \
if doc.cheque_size == "A4" else 0.0,
"cheque_width": doc.cheque_width, "cheque_height": doc.cheque_height,
"acc_pay_dist_from_top_edge": doc.acc_pay_dist_from_top_edge,
"acc_pay_dist_from_left_edge": doc.acc_pay_dist_from_left_edge,
"message_to_show": doc.message_to_show if doc.message_to_show else _("Account Pay Only"),
"date_dist_from_top_edge": doc.date_dist_from_top_edge,
"date_dist_from_left_edge": doc.date_dist_from_left_edge,
"acc_no_dist_from_top_edge": doc.acc_no_dist_from_top_edge,
"acc_no_dist_from_left_edge": doc.acc_no_dist_from_left_edge,
"payer_name_from_top_edge": doc.payer_name_from_top_edge,
"payer_name_from_left_edge": doc.payer_name_from_left_edge,
"amt_in_words_from_top_edge": doc.amt_in_words_from_top_edge,
"amt_in_words_from_left_edge": doc.amt_in_words_from_left_edge,
"amt_in_word_width": doc.amt_in_word_width,
"amt_in_words_line_spacing": doc.amt_in_words_line_spacing,
"amt_in_figures_from_top_edge": doc.amt_in_figures_from_top_edge,
"amt_in_figures_from_left_edge": doc.amt_in_figures_from_left_edge,
"signatory_from_top_edge": doc.signatory_from_top_edge,
"signatory_from_left_edge": doc.signatory_from_left_edge
}
cheque_print.save(ignore_permissions=True)
frappe.db.set_value("Cheque Print Template", template_name, "has_print_format", 1)
return cheque_print
| agpl-3.0 |
madtypist/madtypist.github.io | .bundle/gems/pygments.rb-0.6.0/vendor/simplejson/simplejson/scanner.py | 674 | 2560 | """JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| mit |
nteplov/example-plugin | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
rbarrois/djadmin_export | tests/runner.py | 1 | 1126 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2013 Raphaël Barrois
# This code is distributed under the two-clause BSD license.
from __future__ import unicode_literals
import sys
import django
from django.conf import settings
from django.test.runner import DiscoverRunner as DjangoTestSuiteRunner
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'tests',
'djadmin_export',
],
MIDDLEWARE_CLASSES=[],
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}],
)
django.setup()
default_test_args = 'tests.utils'
def runtests(*test_args):
if not test_args:
test_args = [default_test_args]
runner = DjangoTestSuiteRunner(failfast=False)
failures = runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| lgpl-3.0 |
mitocw/edx-platform | openedx/core/djangoapps/content_libraries/api.py | 3 | 33134 | """
Python API for content libraries.
Via 'views.py', most of these API methods are also exposed as a REST API.
The API methods in this file are focused on authoring and specific to content
libraries; they wouldn't necessarily apply or work in other learning contexts
such as courses, blogs, "pathways," etc.
** As this is an authoring-focused API, all API methods in this file deal with
the DRAFT version of the content library. **
Some of these methods will work and may be used from the LMS if needed (mostly
for test setup; other use is discouraged), but some of the implementation
details rely on Studio so other methods will raise errors if called from the
LMS. (The REST API is not available at all from the LMS.)
Any APIs that use/affect content libraries but are generic enough to work in
other learning contexts too are in the core XBlock python/REST API at
openedx.core.djangoapps.xblock.api/rest_api
For example, to render a content library XBlock as HTML, one can use the generic
render_block_view(block, view_name, user)
API in openedx.core.djangoapps.xblock.api (use it from Studio for the draft
version, from the LMS for published version).
There are one or two methods in this file that have some overlap with the core
XBlock API; for example, this content library API provides a get_library_block()
which returns metadata about an XBlock; it's in this API because it also returns
data about whether or not the XBlock has unpublished edits, which is an
authoring-only concern. Likewise, APIs for getting/setting an individual
XBlock's OLX directly seem more appropriate for small, reusable components in
content libraries and may not be appropriate for other learning contexts so they
are implemented here in the library API only. In the future, if we find a need
for these in most other learning contexts then those methods could be promoted
to the core XBlock API and made generic.
"""
from uuid import UUID
import logging
import attr
from django.contrib.auth.models import AbstractUser, Group
from django.core.exceptions import PermissionDenied
from django.core.validators import validate_unicode_slug
from django.db import IntegrityError
from lxml import etree
from opaque_keys.edx.keys import LearningContextKey
from opaque_keys.edx.locator import BundleDefinitionLocator, LibraryLocatorV2, LibraryUsageLocatorV2
from organizations.models import Organization
import six
from xblock.core import XBlock
from xblock.exceptions import XBlockNotFoundError
from openedx.core.djangoapps.content_libraries import permissions
from openedx.core.djangoapps.content_libraries.library_bundle import LibraryBundle
from openedx.core.djangoapps.content_libraries.models import ContentLibrary, ContentLibraryPermission
from openedx.core.djangoapps.xblock.api import get_block_display_name, load_block
from openedx.core.djangoapps.xblock.learning_context.manager import get_learning_context_impl
from openedx.core.djangoapps.xblock.runtime.olx_parsing import XBlockInclude
from openedx.core.lib.blockstore_api import (
get_bundle,
get_bundle_file_data,
get_bundle_files,
get_or_create_bundle_draft,
create_bundle,
update_bundle,
delete_bundle,
write_draft_file,
set_draft_link,
commit_draft,
delete_draft,
)
from openedx.core.djangolib import blockstore_cache
from openedx.core.djangolib.blockstore_cache import BundleCache
log = logging.getLogger(__name__)
# This API is only used in Studio, so we always work with this draft of any
# content library bundle:
DRAFT_NAME = 'studio_draft'
# Exceptions:
ContentLibraryNotFound = ContentLibrary.DoesNotExist
class ContentLibraryBlockNotFound(XBlockNotFoundError):
""" XBlock not found in the content library """
class LibraryAlreadyExists(KeyError):
""" A library with the specified slug already exists """
class LibraryBlockAlreadyExists(KeyError):
""" An XBlock with that ID already exists in the library """
class InvalidNameError(ValueError):
""" The specified name/identifier is not valid """
# Models:
@attr.s
class ContentLibraryMetadata:
"""
Class that represents the metadata about a content library.
"""
key = attr.ib(type=LibraryLocatorV2)
bundle_uuid = attr.ib(type=UUID)
title = attr.ib("")
description = attr.ib("")
version = attr.ib(0)
has_unpublished_changes = attr.ib(False)
# has_unpublished_deletes will be true when the draft version of the library's bundle
# contains deletes of any XBlocks that were in the most recently published version
has_unpublished_deletes = attr.ib(False)
# Allow any user (even unregistered users) to view and interact directly
# with this library's content in the LMS
allow_public_learning = attr.ib(False)
# Allow any user with Studio access to view this library's content in
# Studio, use it in their courses, and copy content out of this library.
allow_public_read = attr.ib(False)
class AccessLevel:
""" Enum defining library access levels/permissions """
ADMIN_LEVEL = ContentLibraryPermission.ADMIN_LEVEL
AUTHOR_LEVEL = ContentLibraryPermission.AUTHOR_LEVEL
READ_LEVEL = ContentLibraryPermission.READ_LEVEL
NO_ACCESS = None
@attr.s
class ContentLibraryPermissionEntry:
"""
A user or group granted permission to use a content library.
"""
user = attr.ib(type=AbstractUser, default=None)
group = attr.ib(type=Group, default=None)
access_level = attr.ib(AccessLevel.NO_ACCESS)
@attr.s
class LibraryXBlockMetadata:
"""
Class that represents the metadata about an XBlock in a content library.
"""
usage_key = attr.ib(type=LibraryUsageLocatorV2)
def_key = attr.ib(type=BundleDefinitionLocator)
display_name = attr.ib("")
has_unpublished_changes = attr.ib(False)
@attr.s
class LibraryXBlockStaticFile:
"""
Class that represents a static file in a content library, associated with
a particular XBlock.
"""
# File path e.g. "diagram.png"
# In some rare cases it might contain a folder part, e.g. "en/track1.srt"
path = attr.ib("")
# Publicly accessible URL where the file can be downloaded
url = attr.ib("")
# Size in bytes
size = attr.ib(0)
@attr.s
class LibraryXBlockType:
"""
An XBlock type that can be added to a content library
"""
block_type = attr.ib("")
display_name = attr.ib("")
@attr.s
class LibraryBundleLink:
"""
A link from a content library blockstore bundle to another blockstore bundle
"""
# Bundle that is linked to
bundle_uuid = attr.ib(type=UUID)
# Link name (slug)
id = attr.ib("")
# What version of this bundle we are currently linking to.
version = attr.ib(0)
# What the latest version of the linked bundle is:
# (if latest_version > version), the link can be "updated" to the latest version.
latest_version = attr.ib(0)
# Opaque key: If the linked bundle is a library or other learning context whose opaque key we can deduce, then this
# is the key. If we don't know what type of blockstore bundle this link is pointing to, then this is blank.
opaque_key = attr.ib(type=LearningContextKey, default=None)
class AccessLevel:
""" Enum defining library access levels/permissions """
ADMIN_LEVEL = ContentLibraryPermission.ADMIN_LEVEL
AUTHOR_LEVEL = ContentLibraryPermission.AUTHOR_LEVEL
READ_LEVEL = ContentLibraryPermission.READ_LEVEL
NO_ACCESS = None
def list_libraries_for_user(user):
"""
Lists up to 50 content libraries that the user has permission to view.
This method makes at least one HTTP call per library so should only be used
for development until we have something more efficient.
"""
qs = ContentLibrary.objects.all()
filtered_qs = permissions.perms[permissions.CAN_VIEW_THIS_CONTENT_LIBRARY].filter(user, qs)
return [get_library(ref.library_key) for ref in filtered_qs[:50]]
def require_permission_for_library_key(library_key, user, permission):
"""
Given any of the content library permission strings defined in
openedx.core.djangoapps.content_libraries.permissions,
check if the given user has that permission for the library with the
specified library ID.
Raises django.core.exceptions.PermissionDenied if the user doesn't have
permission.
"""
assert isinstance(library_key, LibraryLocatorV2)
library_obj = ContentLibrary.objects.get_by_key(library_key)
if not user.has_perm(permission, obj=library_obj):
raise PermissionDenied
def get_library(library_key):
"""
Get the library with the specified key. Does not check permissions.
returns a ContentLibraryMetadata instance.
Raises ContentLibraryNotFound if the library doesn't exist.
"""
assert isinstance(library_key, LibraryLocatorV2)
ref = ContentLibrary.objects.get_by_key(library_key)
bundle_metadata = get_bundle(ref.bundle_uuid)
lib_bundle = LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME)
(has_unpublished_changes, has_unpublished_deletes) = lib_bundle.has_changes()
return ContentLibraryMetadata(
key=library_key,
bundle_uuid=ref.bundle_uuid,
title=bundle_metadata.title,
description=bundle_metadata.description,
version=bundle_metadata.latest_version,
allow_public_learning=ref.allow_public_learning,
allow_public_read=ref.allow_public_read,
has_unpublished_changes=has_unpublished_changes,
has_unpublished_deletes=has_unpublished_deletes,
)
def create_library(collection_uuid, org, slug, title, description, allow_public_learning, allow_public_read):
"""
Create a new content library.
org: an organizations.models.Organization instance
slug: a slug for this library like 'physics-problems'
title: title for this library
description: description of this library
allow_public_learning: Allow anyone to read/learn from blocks in the LMS
allow_public_read: Allow anyone to view blocks (including source) in Studio?
Returns a ContentLibraryMetadata instance.
"""
assert isinstance(collection_uuid, UUID)
assert isinstance(org, Organization)
validate_unicode_slug(slug)
# First, create the blockstore bundle:
bundle = create_bundle(
collection_uuid,
slug=slug,
title=title,
description=description,
)
# Now create the library reference in our database:
try:
ref = ContentLibrary.objects.create(
org=org,
slug=slug,
bundle_uuid=bundle.uuid,
allow_public_learning=allow_public_learning,
allow_public_read=allow_public_read,
)
except IntegrityError:
delete_bundle(bundle.uuid)
raise LibraryAlreadyExists(slug)
return ContentLibraryMetadata(
key=ref.library_key,
bundle_uuid=bundle.uuid,
title=title,
description=description,
version=0,
allow_public_learning=ref.allow_public_learning,
allow_public_read=ref.allow_public_read,
)
def get_library_team(library_key):
"""
Get the list of users/groups granted permission to use this library.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
return [
ContentLibraryPermissionEntry(user=entry.user, group=entry.group, access_level=entry.access_level)
for entry in ref.permission_grants.all()
]
def set_library_user_permissions(library_key, user, access_level):
"""
Change the specified user's level of access to this library.
access_level should be one of the AccessLevel values defined above.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
if access_level is None:
ref.permission_grants.filter(user=user).delete()
else:
ContentLibraryPermission.objects.update_or_create(
library=ref,
user=user,
defaults={"access_level": access_level},
)
def set_library_group_permissions(library_key, group, access_level):
"""
Change the specified group's level of access to this library.
access_level should be one of the AccessLevel values defined above.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
if access_level is None:
ref.permission_grants.filter(group=group).delete()
else:
ContentLibraryPermission.objects.update_or_create(
library=ref,
group=group,
defaults={"access_level": access_level},
)
def update_library(
library_key,
title=None,
description=None,
allow_public_learning=None,
allow_public_read=None,
):
"""
Update a library's title or description.
(Slug cannot be changed as it would break IDs throughout the system.)
A value of None means "don't change".
"""
ref = ContentLibrary.objects.get_by_key(library_key)
# Update MySQL model:
changed = False
if allow_public_learning is not None:
ref.allow_public_learning = allow_public_learning
changed = True
if allow_public_read is not None:
ref.allow_public_read = allow_public_read
changed = True
if changed:
ref.save()
# Update Blockstore:
fields = {
# We don't ever read the "slug" value from the Blockstore bundle, but
# we might as well always do our best to keep it in sync with the "slug"
# value in the LMS that we do use.
"slug": ref.slug,
}
if title is not None:
assert isinstance(title, six.string_types)
fields["title"] = title
if description is not None:
assert isinstance(description, six.string_types)
fields["description"] = description
update_bundle(ref.bundle_uuid, **fields)
def delete_library(library_key):
"""
Delete a content library
"""
ref = ContentLibrary.objects.get_by_key(library_key)
bundle_uuid = ref.bundle_uuid
# We can't atomically delete the ref and bundle at the same time.
# Delete the ref first, then the bundle. An error may cause the bundle not
# to get deleted, but the library will still be effectively gone from the
# system, which is a better state than having a reference to a library with
# no backing blockstore bundle.
ref.delete()
try:
delete_bundle(bundle_uuid)
except:
log.exception("Failed to delete blockstore bundle %s when deleting library. Delete it manually.", bundle_uuid)
raise
def get_library_blocks(library_key):
"""
Get the list of top-level XBlocks in the specified library.
Returns a list of LibraryXBlockMetadata objects
"""
ref = ContentLibrary.objects.get_by_key(library_key)
lib_bundle = LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME)
usages = lib_bundle.get_top_level_usages()
blocks = []
for usage_key in usages:
# For top-level definitions, we can go from definition key to usage key using the following, but this would not
# work for non-top-level blocks as they may have multiple usages. Top level blocks are guaranteed to have only
# a single usage in the library, which is part of the definition of top level block.
def_key = lib_bundle.definition_for_usage(usage_key)
blocks.append(LibraryXBlockMetadata(
usage_key=usage_key,
def_key=def_key,
display_name=get_block_display_name(def_key),
has_unpublished_changes=lib_bundle.does_definition_have_unpublished_changes(def_key),
))
return blocks
def _lookup_usage_key(usage_key):
"""
Given a LibraryUsageLocatorV2 (usage key for an XBlock in a content library)
return the definition key and LibraryBundle
or raise ContentLibraryBlockNotFound
"""
assert isinstance(usage_key, LibraryUsageLocatorV2)
lib_context = get_learning_context_impl(usage_key)
def_key = lib_context.definition_for_usage(usage_key, force_draft=DRAFT_NAME)
if def_key is None:
raise ContentLibraryBlockNotFound(usage_key)
lib_bundle = LibraryBundle(usage_key.lib_key, def_key.bundle_uuid, draft_name=DRAFT_NAME)
return def_key, lib_bundle
def get_library_block(usage_key):
"""
Get metadata (LibraryXBlockMetadata) about one specific XBlock in a library
To load the actual XBlock instance, use
openedx.core.djangoapps.xblock.api.load_block()
instead.
"""
def_key, lib_bundle = _lookup_usage_key(usage_key)
return LibraryXBlockMetadata(
usage_key=usage_key,
def_key=def_key,
display_name=get_block_display_name(def_key),
has_unpublished_changes=lib_bundle.does_definition_have_unpublished_changes(def_key),
)
def get_library_block_olx(usage_key):
"""
Get the OLX source of the given XBlock.
"""
assert isinstance(usage_key, LibraryUsageLocatorV2)
definition_key = get_library_block(usage_key).def_key
xml_str = get_bundle_file_data(
bundle_uuid=definition_key.bundle_uuid, # pylint: disable=no-member
path=definition_key.olx_path, # pylint: disable=no-member
use_draft=DRAFT_NAME,
).decode('utf-8')
return xml_str
def set_library_block_olx(usage_key, new_olx_str):
"""
Replace the OLX source of the given XBlock.
This is only meant for use by developers or API client applications, as
very little validation is done and this can easily result in a broken XBlock
that won't load.
"""
# because this old pylint can't understand attr.ib() objects, pylint: disable=no-member
assert isinstance(usage_key, LibraryUsageLocatorV2)
# Make sure the block exists:
metadata = get_library_block(usage_key)
block_type = usage_key.block_type
# Verify that the OLX parses, at least as generic XML:
node = etree.fromstring(new_olx_str)
if node.tag != block_type:
raise ValueError("Invalid root tag in OLX, expected {}".format(block_type))
# Write the new XML/OLX file into the library bundle's draft
draft = get_or_create_bundle_draft(metadata.def_key.bundle_uuid, DRAFT_NAME)
write_draft_file(draft.uuid, metadata.def_key.olx_path, new_olx_str.encode('utf-8'))
# Clear the bundle cache so everyone sees the new block immediately:
BundleCache(metadata.def_key.bundle_uuid, draft_name=DRAFT_NAME).clear()
def create_library_block(library_key, block_type, definition_id):
"""
Create a new XBlock in this library of the specified type (e.g. "html").
The 'definition_id' value (which should be a string like "problem1") will be
used as both the definition_id and the usage_id.
"""
assert isinstance(library_key, LibraryLocatorV2)
ref = ContentLibrary.objects.get_by_key(library_key)
# Make sure the proposed ID will be valid:
validate_unicode_slug(definition_id)
# Ensure the XBlock type is valid and installed:
XBlock.load_class(block_type) # Will raise an exception if invalid
# Make sure the new ID is not taken already:
new_usage_id = definition_id # Since this is a top level XBlock, usage_id == definition_id
usage_key = LibraryUsageLocatorV2(
lib_key=library_key,
block_type=block_type,
usage_id=new_usage_id,
)
library_context = get_learning_context_impl(usage_key)
if library_context.definition_for_usage(usage_key) is not None:
raise LibraryBlockAlreadyExists("An XBlock with ID '{}' already exists".format(new_usage_id))
new_definition_xml = '<{}/>'.format(block_type) # xss-lint: disable=python-wrap-html
path = "{}/{}/definition.xml".format(block_type, definition_id)
# Write the new XML/OLX file into the library bundle's draft
draft = get_or_create_bundle_draft(ref.bundle_uuid, DRAFT_NAME)
write_draft_file(draft.uuid, path, new_definition_xml.encode('utf-8'))
# Clear the bundle cache so everyone sees the new block immediately:
BundleCache(ref.bundle_uuid, draft_name=DRAFT_NAME).clear()
# Now return the metadata about the new block:
return get_library_block(usage_key)
def delete_library_block(usage_key, remove_from_parent=True):
"""
Delete the specified block from this library (and any children it has).
If the block's definition (OLX file) is within this same library as the
usage key, both the definition and the usage will be deleted.
If the usage points to a definition in a linked bundle, the usage will be
deleted but the link and the linked bundle will be unaffected.
If the block is in use by some other bundle that links to this one, that
will not prevent deletion of the definition.
remove_from_parent: modify the parent to remove the reference to this
delete block. This should always be true except when this function
calls itself recursively.
"""
def_key, lib_bundle = _lookup_usage_key(usage_key)
# Create a draft:
draft_uuid = get_or_create_bundle_draft(def_key.bundle_uuid, DRAFT_NAME).uuid
# Does this block have a parent?
if usage_key not in lib_bundle.get_top_level_usages() and remove_from_parent:
# Yes: this is not a top-level block.
# First need to modify the parent to remove this block as a child.
raise NotImplementedError
# Does this block have children?
block = load_block(usage_key, user=None)
if block.has_children:
# Next, recursively call delete_library_block(...) on each child usage
for child_usage in block.children:
# Specify remove_from_parent=False to avoid unnecessary work to
# modify this block's children list when deleting each child, since
# we're going to delete this block anyways.
delete_library_block(child_usage, remove_from_parent=False)
# Delete the definition:
if def_key.bundle_uuid == lib_bundle.bundle_uuid:
# This definition is in the library, so delete it:
path_prefix = lib_bundle.olx_prefix(def_key)
for bundle_file in get_bundle_files(def_key.bundle_uuid, use_draft=DRAFT_NAME):
if bundle_file.path.startswith(path_prefix):
# Delete this file, within this definition's "folder"
write_draft_file(draft_uuid, bundle_file.path, contents=None)
else:
# The definition must be in a linked bundle, so we don't want to delete
# it; just the <xblock-include /> in the parent, which was already
# deleted above.
pass
# Clear the bundle cache so everyone sees the deleted block immediately:
lib_bundle.cache.clear()
def create_library_block_child(parent_usage_key, block_type, definition_id):
"""
Create a new XBlock definition in this library of the specified type (e.g.
"html"), and add it as a child of the specified existing block.
The 'definition_id' value (which should be a string like "problem1") will be
used as both the definition_id and the usage_id of the child.
"""
assert isinstance(parent_usage_key, LibraryUsageLocatorV2)
# Load the parent block to make sure it exists and so we can modify its 'children' field:
parent_block = load_block(parent_usage_key, user=None)
if not parent_block.has_children:
raise ValueError("The specified parent XBlock does not allow child XBlocks.")
# Create the new block in the library:
metadata = create_library_block(parent_usage_key.context_key, block_type, definition_id)
# Set the block as a child.
# This will effectively "move" the newly created block from being a top-level block in the library to a child.
include_data = XBlockInclude(link_id=None, block_type=block_type, definition_id=definition_id, usage_hint=None)
parent_block.runtime.add_child_include(parent_block, include_data)
parent_block.save()
return metadata
def get_library_block_static_asset_files(usage_key):
"""
Given an XBlock in a content library, list all the static asset files
associated with that XBlock.
Returns a list of LibraryXBlockStaticFile objects.
"""
def_key, lib_bundle = _lookup_usage_key(usage_key)
result = [
LibraryXBlockStaticFile(path=f.path, url=f.url, size=f.size)
for f in lib_bundle.get_static_files_for_definition(def_key)
]
result.sort(key=lambda f: f.path)
return result
def add_library_block_static_asset_file(usage_key, file_name, file_content):
"""
Upload a static asset file into the library, to be associated with the
specified XBlock. Will silently overwrite an existing file of the same name.
file_name should be a name like "doc.pdf". It may optionally contain slashes
like 'en/doc.pdf'
file_content should be a binary string.
Returns a LibraryXBlockStaticFile object.
Example:
video_block = UsageKey.from_string("lb:VideoTeam:python-intro:video:1")
add_library_block_static_asset_file(video_block, "subtitles-en.srt", subtitles.encode('utf-8'))
"""
assert isinstance(file_content, six.binary_type)
def_key, lib_bundle = _lookup_usage_key(usage_key)
if file_name != file_name.strip().strip('/'):
raise InvalidNameError("file name cannot start/end with / or whitespace.")
if '//' in file_name or '..' in file_name:
raise InvalidNameError("Invalid sequence (// or ..) in filename.")
file_path = lib_bundle.get_static_prefix_for_definition(def_key) + file_name
# Write the new static file into the library bundle's draft
draft = get_or_create_bundle_draft(def_key.bundle_uuid, DRAFT_NAME)
write_draft_file(draft.uuid, file_path, file_content)
# Clear the bundle cache so everyone sees the new file immediately:
lib_bundle.cache.clear()
file_metadata = blockstore_cache.get_bundle_file_metadata_with_cache(
bundle_uuid=def_key.bundle_uuid, path=file_path, draft_name=DRAFT_NAME,
)
return LibraryXBlockStaticFile(path=file_metadata.path, url=file_metadata.url, size=file_metadata.size)
def delete_library_block_static_asset_file(usage_key, file_name):
"""
Delete a static asset file from the library.
Example:
video_block = UsageKey.from_string("lb:VideoTeam:python-intro:video:1")
delete_library_block_static_asset_file(video_block, "subtitles-en.srt")
"""
def_key, lib_bundle = _lookup_usage_key(usage_key)
if '..' in file_name:
raise InvalidNameError("Invalid .. in file name.")
file_path = lib_bundle.get_static_prefix_for_definition(def_key) + file_name
# Delete the file from the library bundle's draft
draft = get_or_create_bundle_draft(def_key.bundle_uuid, DRAFT_NAME)
write_draft_file(draft.uuid, file_path, contents=None)
# Clear the bundle cache so everyone sees the new file immediately:
lib_bundle.cache.clear()
def get_allowed_block_types(library_key): # pylint: disable=unused-argument
"""
Get a list of XBlock types that can be added to the specified content
library. For now, the result is the same regardless of which library is
specified, but that may change in the future.
"""
# This import breaks in the LMS so keep it here. The LMS doesn't generally
# use content libraries APIs directly but some tests may want to use them to
# create libraries and then test library learning or course-library integration.
from cms.djangoapps.contentstore.views.helpers import xblock_type_display_name
# TODO: return support status and template options
# See cms/djangoapps/contentstore/views/component.py
block_types = sorted(name for name, class_ in XBlock.load_classes())
info = []
for block_type in block_types:
display_name = xblock_type_display_name(block_type, None)
# For now as a crude heuristic, we exclude blocks that don't have a display_name
if display_name:
info.append(LibraryXBlockType(block_type=block_type, display_name=display_name))
return info
def get_bundle_links(library_key):
"""
Get the list of bundles/libraries linked to this content library.
Returns LibraryBundleLink objects (defined above).
Because every content library is a blockstore bundle, it can have "links" to
other bundles, which may or may not be content libraries. This allows using
XBlocks (or perhaps even static assets etc.) from another bundle without
needing to duplicate/copy the data.
Links always point to a specific published version of the target bundle.
Links are identified by a slug-like ID, e.g. "link1"
"""
ref = ContentLibrary.objects.get_by_key(library_key)
links = blockstore_cache.get_bundle_draft_direct_links_cached(ref.bundle_uuid, DRAFT_NAME)
results = []
# To be able to quickly get the library ID from the bundle ID for links which point to other libraries, build a map:
bundle_uuids = set(link_data.bundle_uuid for link_data in links.values())
libraries_linked = {
lib.bundle_uuid: lib
for lib in ContentLibrary.objects.select_related('org').filter(bundle_uuid__in=bundle_uuids)
}
for link_name, link_data in links.items():
# Is this linked bundle a content library?
try:
opaque_key = libraries_linked[link_data.bundle_uuid].library_key
except KeyError:
opaque_key = None
# Append the link information:
results.append(LibraryBundleLink(
id=link_name,
bundle_uuid=link_data.bundle_uuid,
version=link_data.version,
latest_version=blockstore_cache.get_bundle_version_number(link_data.bundle_uuid),
opaque_key=opaque_key,
))
return results
def create_bundle_link(library_key, link_id, target_opaque_key, version=None):
"""
Create a new link to the resource with the specified opaque key.
For now, only LibraryLocatorV2 opaque keys are supported.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
# Make sure this link ID/name is not already in use:
links = blockstore_cache.get_bundle_draft_direct_links_cached(ref.bundle_uuid, DRAFT_NAME)
if link_id in links:
raise InvalidNameError("That link ID is already in use.")
# Determine the target:
if not isinstance(target_opaque_key, LibraryLocatorV2):
raise TypeError("For now, only LibraryLocatorV2 opaque keys are supported by create_bundle_link")
target_bundle_uuid = ContentLibrary.objects.get_by_key(target_opaque_key).bundle_uuid
if version is None:
version = get_bundle(target_bundle_uuid).latest_version
# Create the new link:
draft = get_or_create_bundle_draft(ref.bundle_uuid, DRAFT_NAME)
set_draft_link(draft.uuid, link_id, target_bundle_uuid, version)
# Clear the cache:
LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME).cache.clear()
def update_bundle_link(library_key, link_id, version=None, delete=False):
"""
Update a bundle's link to point to the specified version of its target
bundle. Use version=None to automatically point to the latest version.
Use delete=True to delete the link.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
draft = get_or_create_bundle_draft(ref.bundle_uuid, DRAFT_NAME)
if delete:
set_draft_link(draft.uuid, link_id, None, None)
else:
links = blockstore_cache.get_bundle_draft_direct_links_cached(ref.bundle_uuid, DRAFT_NAME)
try:
link = links[link_id]
except KeyError:
raise InvalidNameError("That link does not exist.")
if version is None:
version = get_bundle(link.bundle_uuid).latest_version
set_draft_link(draft.uuid, link_id, link.bundle_uuid, version)
# Clear the cache:
LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME).cache.clear()
def publish_changes(library_key):
"""
Publish all pending changes to the specified library.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
bundle = get_bundle(ref.bundle_uuid)
if DRAFT_NAME in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[DRAFT_NAME] # pylint: disable=unsubscriptable-object
commit_draft(draft_uuid)
else:
return # If there is no draft, no action is needed.
LibraryBundle(library_key, ref.bundle_uuid).cache.clear()
LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME).cache.clear()
def revert_changes(library_key):
"""
Revert all pending changes to the specified library, restoring it to the
last published version.
"""
ref = ContentLibrary.objects.get_by_key(library_key)
bundle = get_bundle(ref.bundle_uuid)
if DRAFT_NAME in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[DRAFT_NAME] # pylint: disable=unsubscriptable-object
delete_draft(draft_uuid)
else:
return # If there is no draft, no action is needed.
LibraryBundle(library_key, ref.bundle_uuid, draft_name=DRAFT_NAME).cache.clear()
| agpl-3.0 |
foospidy/DbDat | plugins/mssql/check_configuration_server_audit_specifications.py | 1 | 1746 | class check_configuration_server_audit_specifications():
"""
check_configuration_server_audit_specifications:
Auditing of defined server level events. Review to ensure proper auditing
is enabled.
"""
# References:
# https://benchmarks.cisecurity.org/downloads/show-single/index.cfm?file=sql2012DB.120
TITLE = 'Server Audit Specifications'
CATEGORY = 'Configuration'
TYPE = 'sql'
SQL = """
SELECT audit_id,
a.name as audit_name,
s.name as server_specification_name,
d.audit_action_name,
s.is_state_enabled,
d.is_group,
d.audit_action_id,
s.create_date,
s.modify_date
FROM sys.server_audits AS a
JOIN sys.server_audit_specifications AS s
ON a.audit_guid = s.audit_guid
JOIN sys.server_audit_specification_details AS d
ON s.server_specification_id = d.server_specification_id
WHERE s.is_state_enabled = 1
"""
verbose = False
skip = False
result = {}
def do_check(self, *results):
for rows in results:
if 0 == len(rows):
self.result['level'] = 'RED'
self.result['output'] = 'There are no server audit specifications enabled.'
else:
for row in rows:
self.result['level'] = 'YELLOW'
self.result['output'] = '%s %s %s %s \n' % (row[0], row[1], row[2], row[3])
return self.result
def __init__(self, parent):
print('Performing check: ' + self.TITLE)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.