repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
zhengyongbo/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/setup.py | 226 | 5923 | #! /usr/bin/python
#
# See README for usage instructions.
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
from distutils.spawn import find_executable
import sys
import os
import subprocess
maintainer_email = "[email protected]"
# Find the Protocol Compiler.
if os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if not os.path.exists(source):
print "Can't find required file: " + source
sys.exit(-1)
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print "Generating %s..." % output
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
if __name__ == '__main__':
# TODO(kenton): Integrate this into setuptools somehow?
if len(sys.argv) >= 2 and sys.argv[1] == "clean":
# Delete generated _pb2.py files and .pyc files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o"):
os.remove(filepath)
else:
# Generate necessary .proto file if it doesn't exist.
# TODO(kenton): Maybe we should hook this into a distutils command?
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print "Using EXPERIMENTAL C++ Implmenetation."
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf',
version = '2.4.2-pre',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
ext_modules = ext_module_list,
url = 'http://code.google.com/p/protobuf/',
maintainer = maintainer_email,
maintainer_email = '[email protected]',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Protocol Buffers are Google's data interchange format.",
)
| bsd-3-clause |
simsong/grr-insider | lib/parsers.py | 1 | 5759 | #!/usr/bin/env python
"""Registry for parsers and abstract classes for basic parser functionality."""
from grr.lib import artifact_lib
from grr.lib import rdfvalue
from grr.lib import registry
class Error(Exception):
"""Base error class."""
class ParserDefinitionError(Exception):
"""A parser was defined badly."""
class CommandFailedError(Error):
"""An error that gets raised due to the command failing."""
class ParseError(Error):
"""An error that gets raised due to the parsing of the output failing."""
class Parser(object):
"""A class for looking up parsers.
Parsers may be in other libraries or third party code, this class keeps
references to each of them so they can be called by name by the artifacts.
"""
__metaclass__ = registry.MetaclassRegistry
# A list of string identifiers for artifacts that this parser can process.
supported_artifacts = []
# The semantic types that can be produced by this parser.
output_types = []
# If set to true results for this parser must collected and processed in one
# go. This allows parsers to combine the results of multiple files/registry
# keys. It is disabled by default as it is more efficient to stream and parse
# results one at a time when this is not necessary.
process_together = False
@classmethod
def GetClassesByArtifact(cls, artifact_name):
"""Get the classes that support parsing a given artifact."""
return [cls.classes[c] for c in cls.classes if artifact_name in
cls.classes[c].supported_artifacts]
@classmethod
def GetDescription(cls):
if cls.__doc__:
return cls.__doc__.split("\n")[0]
else:
return ""
@classmethod
def Validate(cls):
"""Validate a parser is well defined."""
for artifact_to_parse in cls.supported_artifacts:
if artifact_to_parse not in artifact_lib.ArtifactRegistry.artifacts:
raise ParserDefinitionError("Artifact parser %s has an invalid artifact"
" %s. Artifact is undefined" %
(cls.__name__, artifact_to_parse))
for out_type in cls.output_types:
if out_type not in rdfvalue.RDFValue.classes:
raise ParserDefinitionError("Artifact parser %s has an invalid output "
"type %s." % (cls.__name__, out_type))
if cls.process_together:
if not hasattr(cls, "ParseMultiple"):
raise ParserDefinitionError("Parser %s has set process_together, but "
"has not defined a ParseMultiple method." %
cls.__name__)
class CommandParser(Parser):
"""Abstract parser for processing command output.
Must implement the Parse function.
"""
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Take the output of the command run, and yield RDFValues."""
def CheckReturn(self, cmd, return_val):
"""Raise if return value is bad."""
if return_val != 0:
raise CommandFailedError("Parsing output of Command %s failed, as "
"command had %s return code" % (cmd, return_val))
class FileParser(Parser):
"""Abstract parser for processing files output.
Must implement the Parse function.
"""
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
def Parse(self, stat, file_object, knowledge_base):
"""Take the file data, and yield RDFValues."""
def ParseMultiple(self, stats, file_objects, knowledge_base):
"""Take the file data, and yield RDFValues."""
class WMIQueryParser(Parser):
"""Abstract parser for processing WMI query output."""
def Parse(self, query, result_dict, knowledge_base):
"""Take the output of the query, and yield RDFValues."""
class RegistryValueParser(Parser):
"""Abstract parser for processing Registry values."""
def Parse(self, stat, knowledge_base):
"""Take the stat, and yield RDFValues."""
class RegistryParser(Parser):
"""Abstract parser for processing Registry values."""
def ParseMultiple(self, stats, knowledge_base):
"""Parse multiple results in a single call."""
def Parse(self, stat, knowledge_base):
"""Take the stat, and yield RDFValues."""
class GenericResponseParser(Parser):
"""Abstract response parser."""
def Parse(self, response, knowledge_base):
"""Parse the response object."""
class GrepParser(Parser):
"""Parser for the results of grep artifacts."""
def Parse(self, response, knowledge_base):
"""Parse the FileFinderResult.matches."""
class ArtifactFilesParser(Parser):
"""Abstract parser for processing artifact files."""
def Parse(self, persistence, knowledge_base, download_pathtype):
"""Parse artifact files."""
class VolatilityPluginParser(Parser):
"""Abstract parser for processing Volatility results."""
def ParseMultiple(self, results, knowledge_base):
"""Parse multiple results in a single call."""
def Parse(self, results, knowledge_base):
"""Take the stat, and yield RDFValues."""
def CheckError(self, result):
if result.error:
raise ParseError("Volatility returned an error for plugin %s. Error: %s"
% (result.plugin, result.error))
def IterateSections(self, result, plugin=None):
"""Iterate through all sections if a plugin matches."""
self.CheckError(result)
if plugin and result.plugin == plugin:
for section in result.sections:
headers = [h.name for h in section.table.headers]
for row in section.table.rows:
yield dict(zip(headers, row.values))
| apache-2.0 |
feigames/Odoo | addons/pos_discount/discount.py | 315 | 1468 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'discount_pc': fields.float('Discount Percentage', help='The discount percentage'),
'discount_product_id': fields.many2one('product.product','Discount Product', help='The product used to model the discount'),
}
_defaults = {
'discount_pc': 10,
}
| agpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27/Lib/ctypes/test/test_unaligned_structures.py | 282 | 1215 | import sys, unittest
from ctypes import *
structures = []
byteswapped_structures = []
if sys.byteorder == "little":
SwappedStructure = BigEndianStructure
else:
SwappedStructure = LittleEndianStructure
for typ in [c_short, c_int, c_long, c_longlong,
c_float, c_double,
c_ushort, c_uint, c_ulong, c_ulonglong]:
class X(Structure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
class Y(SwappedStructure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
structures.append(X)
byteswapped_structures.append(Y)
class TestStructures(unittest.TestCase):
def test_native(self):
for typ in structures:
## print typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
def test_swapped(self):
for typ in byteswapped_structures:
## print >> sys.stderr, typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
if __name__ == '__main__':
unittest.main()
| mit |
jtg-gg/blink | Source/build/scripts/make_internal_runtime_flags.py | 42 | 2736 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
import in_generator
import make_runtime_features
import name_utilities
import template_expander
# We want exactly the same parsing as RuntimeFeatureWriter
# but generate different files.
class InternalRuntimeFlagsWriter(make_runtime_features.RuntimeFeatureWriter):
class_name = 'InternalRuntimeFlags'
filters = {
'enable_conditional': name_utilities.enable_conditional_if_endif,
}
def __init__(self, in_file_path):
super(InternalRuntimeFlagsWriter, self).__init__(in_file_path)
self._outputs = {(self.class_name + '.idl'): self.generate_idl,
(self.class_name + '.h'): self.generate_header,
}
@template_expander.use_jinja(class_name + '.idl.tmpl')
def generate_idl(self):
return {
'features': self._features,
}
@template_expander.use_jinja(class_name + '.h.tmpl', filters=filters)
def generate_header(self):
return {
'features': self._features,
'feature_sets': self._feature_sets(),
}
if __name__ == '__main__':
in_generator.Maker(InternalRuntimeFlagsWriter).main(sys.argv)
| bsd-3-clause |
kwanghui/patents | test/test_sqlite_merge.py | 6 | 3869 | #!/usr/bin/env python
import unittest
import os
import sqlite3
import sys
sys.path.append( '../lib/' )
import SQLite
class TestSQLite(unittest.TestCase):
def removeFile(self, fname):
#delete a fname if it exists
try:
os.remove(fname)
except OSError:
pass
def createFile(self, file, type=None, data="1,2,3"):
#create a file db, csv
if file.split(".")[-1] == "db" or type == "db":
connection = sqlite3.connect(file)
cursor = connection.cursor()
cursor.executescript("""
CREATE TABLE test (a, B, cursor);
CREATE TABLE main (d, E, f);
INSERT INTO test VALUES ({data});
INSERT INTO main VALUES ({data});
CREATE INDEX idx ON test (a);
CREATE INDEX idy ON test (a, b);
""".format(data=data)) #"""
connection.commit()
cursor.close()
connection = sqlite3.connect(file)
elif file.split(".")[-1] == "csv" or type == "csv":
os.system("echo '{data}' >> {file}".\
format(data=data, file=file))
def setUp(self):
self.removeFile("test.db")
self.removeFile("test.csv")
self.removeFile("test2.db")
self.removeFile("test2.csv")
# create a really basic dataset
self.createFile(file="test.db")
self.s = SQLite.SQLite(db="test.db", tbl="test")
self.createFile("test2.db")
s = SQLite.SQLite("test2.db", tbl="test")
self.s.attach(s)
def tearDown(self):
self.s.close()
self.removeFile("test.db")
self.removeFile("test.csv")
self.removeFile("test2.db")
self.removeFile("test2.csv")
self.removeFile("errlog")
def test_indexes(self):
self.assertIn('idx', self.s.indexes())
self.assertTrue(self.s.indexes(lookup="idx"))
self.assertFalse(self.s.indexes(lookup="xdi"))
self.assertEquals([0,0], self.s.indexes(seq="xdi"))
self.assertEquals([1,1], self.s.indexes(seq="idx"))
self.s.c.executescript("""
CREATE INDEX idx1 ON test (b);
CREATE INDEX idx2 ON test (cursor);
CREATE INDEX idx5x3 ON test (a);
CREATE INDEX idx10x ON test (a);
""")
self.assertEquals([1,3], self.s.indexes(seq="idx"))
def test__baseIndex(self):
self.assertItemsEqual(['test (a)', 'test (a,b)'],
self.s._baseIndex(db="db"))
self.assertEqual('test (a)',
self.s._baseIndex(idx="idx"))
self.assertEqual('foo (bar,foo)',
self.s._baseIndex(idx="create index x on foo (foo, bar)"))
self.assertEqual('unique foo (foo)',
self.s._baseIndex(idx="create unique index x on foo (foo)"))
def test_index(self):
self.s.index([['a','cursor']])
self.assertIn('test (a,cursor)', self.s._baseIndex())
self.s.index('a', unique=True)
self.assertIn('test (a)', self.s._baseIndex())
self.assertFalse(self.s.index(['a','cursor']))
self.s.index('f', tbl="main")
self.assertIn('main (f)', self.s._baseIndex())
self.assertFalse(self.s.index('a', tbl="main"))
#self.s.index(['e', 'f'], combo=True, tbl="main")
#self.assertIn('main (e)', self.s._baseIndex(tbl="main"))
#self.assertIn('main (e,f)', self.s._baseIndex(tbl="main"))
self.s.index([['a','cursor']], db="db")
self.assertIn('test (a,cursor)', self.s._baseIndex(db="db"))
# def test_merge(self):
# s = SQLite.SQLite()
# s.merge(key=[['AsgNum', 'pdpass']], on=[['assigneeAsc', 'assignee']],
# keyType=['INTEGER'], tableFrom='main', db='db')
# assert(1 == 1)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
arbrandes/edx-platform | openedx/core/djangoapps/dark_lang/views.py | 4 | 4169 | """
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from web_fragments.fragment import Fragment
from openedx.core.djangoapps.dark_lang import DARK_LANGUAGE_KEY
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
LANGUAGE_INPUT_FIELD = 'preview_language'
class PreviewLanguageFragmentView(EdxFragmentView):
"""
View used when a user is attempting to change the preview language using Darklang.
Expected Behavior:
GET - returns a form for setting/resetting the user's dark language
POST - updates or clears the setting to the given dark language
"""
def render_to_fragment(self, request, course_id=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""
Renders the language preview view as a fragment.
"""
html = render_to_string('dark_lang/preview-language-fragment.html', {})
return Fragment(html)
def create_base_standalone_context(self, request, fragment, **kwargs):
"""
Creates the base context for rendering a fragment as a standalone page.
"""
return {
'uses_bootstrap': True,
}
def standalone_page_title(self, request, fragment, **kwargs):
"""
Returns the page title for the standalone update page.
"""
return _('Preview Language Administration')
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""
Renders the fragment to control the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
return super().get(request, *args, **kwargs)
@method_decorator(login_required)
def post(self, request, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Accept requests to update the preview language.
"""
if not self._user_can_preview_languages(request.user):
raise Http404
action = request.POST.get('action', None)
if action == 'set_preview_language':
self._set_preview_language(request)
elif action == 'reset_preview_language':
self._clear_preview_language(request)
return redirect(request.path)
def _user_can_preview_languages(self, user):
"""
Returns true if the specified user can preview languages.
"""
if not DarkLangConfig.current().enabled:
return False
return user and not user.is_anonymous
def _set_preview_language(self, request):
"""
Sets the preview language for the current user.
"""
preview_language = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_language.strip():
PageLevelMessages.register_error_message(request, _('Language not provided'))
return
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_language)
PageLevelMessages.register_success_message(
request,
_('Language set to {preview_language}').format(
preview_language=preview_language
)
)
def _clear_preview_language(self, request):
"""
Clears the preview language for the current user.
"""
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
PageLevelMessages.register_success_message(
request,
_('Language reset to the default')
)
| agpl-3.0 |
yglazko/socorro | socorro/unittest/external/postgresql/test_platforms.py | 11 | 2365 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.plugins.attrib import attr
from nose.tools import eq_
from socorro.external.postgresql.platforms import Platforms
from .unittestbase import PostgreSQLTestCase
#==============================================================================
@attr(integration='postgres') # for nosetests
class IntegrationTestPlatforms(PostgreSQLTestCase):
"""Test socorro.external.postgresql.platforms.Platforms class. """
#--------------------------------------------------------------------------
def setUp(self):
"""Set up this test class by populating the os_names table with fake
data. """
super(IntegrationTestPlatforms, self).setUp()
cursor = self.connection.cursor()
# Insert data
cursor.execute("""
INSERT INTO os_names
(os_name, os_short_name)
VALUES
(
'Windows NT',
'win'
),
(
'Mac OS X',
'mac'
),
(
'Linux',
'lin'
);
""")
self.connection.commit()
#--------------------------------------------------------------------------
def tearDown(self):
"""Clean up the database, delete tables and functions. """
cursor = self.connection.cursor()
cursor.execute("""
TRUNCATE os_names CASCADE
""")
self.connection.commit()
super(IntegrationTestPlatforms, self).tearDown()
#--------------------------------------------------------------------------
def test_get(self):
platforms = Platforms(config=self.config)
res = platforms.get()
res_expected = {
"hits": [
{
"name": "Windows NT",
"code": "win"
},
{
"name": "Mac OS X",
"code": "mac"
},
{
"name": "Linux",
"code": "lin"
}
],
"total": 3
}
eq_(res, res_expected)
| mpl-2.0 |
apache/airflow | airflow/providers/apache/kylin/operators/kylin_cube.py | 2 | 7632 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from datetime import datetime
from typing import Optional
from kylinpy import kylinpy
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
class KylinCubeOperator(BaseOperator):
"""
This operator is used to submit request about kylin build/refresh/merge,
and can track job status . so users can easier to build kylin job
For more detail information in
`Apache Kylin <http://kylin.apache.org/>`_
:param kylin_conn_id: The connection id as configured in Airflow administration.
:type kylin_conn_id: str
:param project: kylin project name, this param will overwrite the project in kylin_conn_id:
:type project: str
:param cube: kylin cube name
:type cube: str
:param dsn: (dsn , dsn url of kylin connection ,which will overwrite kylin_conn_id.
for example: kylin://ADMIN:KYLIN@sandbox/learn_kylin?timeout=60&is_debug=1)
:type dsn: str
:param command: (kylin command include 'build', 'merge', 'refresh', 'delete',
'build_streaming', 'merge_streaming', 'refresh_streaming', 'disable', 'enable',
'purge', 'clone', 'drop'.
build - use /kylin/api/cubes/{cubeName}/build rest api,and buildType is ‘BUILD’,
and you should give start_time and end_time
refresh - use build rest api,and buildType is ‘REFRESH’
merge - use build rest api,and buildType is ‘MERGE’
build_streaming - use /kylin/api/cubes/{cubeName}/build2 rest api,and buildType is ‘BUILD’
and you should give offset_start and offset_end
refresh_streaming - use build2 rest api,and buildType is ‘REFRESH’
merge_streaming - use build2 rest api,and buildType is ‘MERGE’
delete - delete segment, and you should give segment_name value
disable - disable cube
enable - enable cube
purge - purge cube
clone - clone cube,new cube name is {cube_name}_clone
drop - drop cube)
:type command: str
:param start_time: build segment start time
:type start_time: Optional[str]
:param end_time: build segment end time
:type end_time: Optional[str]
:param offset_start: streaming build segment start time
:type offset_start: Optional[str]
:param offset_end: streaming build segment end time
:type offset_end: Optional[str]
:param segment_name: segment name
:type segment_name: str
:param is_track_job: (whether to track job status. if value is True,will track job until
job status is in("FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL",
"STOPPED") or timeout)
:type is_track_job: bool
:param interval: track job status,default value is 60s
:type interval: int
:param timeout: timeout value,default value is 1 day,60 * 60 * 24 s
:type timeout: int
:param eager_error_status: (jobs error status,if job status in this list ,this task will be error.
default value is tuple(["ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"]))
:type eager_error_status: tuple
"""
template_fields = (
'project',
'cube',
'dsn',
'command',
'start_time',
'end_time',
'segment_name',
'offset_start',
'offset_end',
)
ui_color = '#E79C46'
build_command = {
'fullbuild',
'build',
'merge',
'refresh',
'build_streaming',
'merge_streaming',
'refresh_streaming',
}
jobs_end_status = {"FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"}
def __init__(
self,
*,
kylin_conn_id: str = 'kylin_default',
project: Optional[str] = None,
cube: Optional[str] = None,
dsn: Optional[str] = None,
command: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
offset_start: Optional[str] = None,
offset_end: Optional[str] = None,
segment_name: Optional[str] = None,
is_track_job: bool = False,
interval: int = 60,
timeout: int = 60 * 60 * 24,
eager_error_status=("ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"),
**kwargs,
):
super().__init__(**kwargs)
self.kylin_conn_id = kylin_conn_id
self.project = project
self.cube = cube
self.dsn = dsn
self.command = command
self.start_time = start_time
self.end_time = end_time
self.segment_name = segment_name
self.offset_start = offset_start
self.offset_end = offset_end
self.is_track_job = is_track_job
self.interval = interval
self.timeout = timeout
self.eager_error_status = eager_error_status
self.jobs_error_status = [stat.upper() for stat in eager_error_status]
def execute(self, context):
_hook = KylinHook(kylin_conn_id=self.kylin_conn_id, project=self.project, dsn=self.dsn)
_support_invoke_command = kylinpy.CubeSource.support_invoke_command
if self.command.lower() not in _support_invoke_command:
raise AirflowException(
'Kylin:Command {} can not match kylin command list {}'.format(
self.command, _support_invoke_command
)
)
kylinpy_params = {
'start': datetime.fromtimestamp(int(self.start_time) / 1000) if self.start_time else None,
'end': datetime.fromtimestamp(int(self.end_time) / 1000) if self.end_time else None,
'name': self.segment_name,
'offset_start': int(self.offset_start) if self.offset_start else None,
'offset_end': int(self.offset_end) if self.offset_end else None,
}
rsp_data = _hook.cube_run(self.cube, self.command.lower(), **kylinpy_params)
if self.is_track_job and self.command.lower() in self.build_command:
started_at = time.monotonic()
job_id = rsp_data.get("uuid")
if job_id is None:
raise AirflowException("kylin job id is None")
self.log.info("kylin job id: %s", job_id)
job_status = None
while job_status not in self.jobs_end_status:
if time.monotonic() - started_at > self.timeout:
raise AirflowException(f'kylin job {job_id} timeout')
time.sleep(self.interval)
job_status = _hook.get_job_status(job_id)
self.log.info('Kylin job status is %s ', job_status)
if job_status in self.jobs_error_status:
raise AirflowException(f'Kylin job {job_id} status {job_status} is error ')
if self.do_xcom_push:
return rsp_data
| apache-2.0 |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/bdist_wininst.py | 49 | 14929 | """distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_wininst.py 71422 2009-04-09 22:48:19Z tarek.ziade $"
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = 6.0
else:
bv = 7.1
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
return open(filename, "rb").read()
# class bdist_wininst
| apache-2.0 |
Mr-lin930819/SimplOS | fill_screens.py | 17 | 2574 | #!/usr/bin/env python2.5
import cgi
import os
import shutil
import sys
import sqlite3
SCREENS = 5
COLUMNS = 4
ROWS = 4
CELL_SIZE = 110
DIR = "db_files"
AUTO_FILE = "launcher.db"
APPLICATION_COMPONENTS = [
"com.android.calculator2/com.android.calculator2.Calculator",
"com.android.providers.downloads.ui/com.android.providers.downloads.ui.DownloadList",
"com.android.settings/com.android.settings.Settings",
"com.android.mms/com.android.mms.ui.ConversationList",
"com.android.contacts/com.android.contacts.activities.PeopleActivity",
"com.android.dialer/com.android.dialer.DialtactsActivity"
]
def usage():
print "usage: fill_screens.py -- fills up the launcher db"
def make_dir():
shutil.rmtree(DIR, True)
os.makedirs(DIR)
def pull_file(fn):
print "pull_file: " + fn
rv = os.system("adb pull"
+ " /data/data/com.android.launcher/databases/launcher.db"
+ " " + fn);
if rv != 0:
print "adb pull failed"
sys.exit(1)
def push_file(fn):
print "push_file: " + fn
rv = os.system("adb push"
+ " " + fn
+ " /data/data/com.android.launcher/databases/launcher.db")
if rv != 0:
print "adb push failed"
sys.exit(1)
def process_file(fn):
print "process_file: " + fn
conn = sqlite3.connect(fn)
c = conn.cursor()
c.execute("DELETE FROM favorites")
intentFormat = "#Intent;action=android.intent.action.MAIN;category=android.intent.category.LAUNCHER;launchFlags=0x10200000;component=%s;end"
id = 0;
for s in range(SCREENS):
for x in range(ROWS):
for y in range(COLUMNS):
id += 1
insert = "INSERT into favorites (_id, title, intent, container, screen, cellX, cellY, spanX, spanY, itemType, appWidgetId, iconType) VALUES (%d, '%s', '%s', %d, %d, %d, %d, %d, %d, %d, %d, %d)"
insert = insert % (id, "title", "", -100, s, x, y, 1, 1, 2, -1, 0)
c.execute(insert)
folder_id = id
for z in range(15):
id += 1
intent = intentFormat % (APPLICATION_COMPONENTS[id % len(APPLICATION_COMPONENTS)])
insert = "INSERT into favorites (_id, title, intent, container, screen, cellX, cellY, spanX, spanY, itemType, appWidgetId, iconType) VALUES (%d, '%s', '%s', %d, %d, %d, %d, %d, %d, %d, %d, %d)"
insert = insert % (id, "title", intent, folder_id, 0, 0, 0, 1, 1, 0, -1, 0)
c.execute(insert)
conn.commit()
c.close()
def main(argv):
if len(argv) == 1:
make_dir()
pull_file(AUTO_FILE)
process_file(AUTO_FILE)
push_file(AUTO_FILE)
else:
usage()
if __name__=="__main__":
main(sys.argv)
| gpl-3.0 |
robertswiecki/oss-fuzz | infra/gcb/builds_status.py | 1 | 3990 | #!/usr/bin/env python2
import datetime
import os
import sys
import jinja2
import json
import tempfile
import dateutil.parser
from oauth2client.client import GoogleCredentials
import googleapiclient
from googleapiclient.discovery import build as gcb_build
from google.cloud import logging
from google.cloud import storage
from jinja2 import Environment, FileSystemLoader
STATUS_BUCKET = 'oss-fuzz-build-logs'
LOGS_BUCKET = 'oss-fuzz-gcb-logs'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
def usage():
sys.stderr.write(
"Usage: " + sys.argv[0] + " <projects_dir>\n")
exit(1)
def scan_project_names(projects_dir):
projects = []
for root, dirs, files in os.walk(projects_dir):
for f in files:
if f == "Dockerfile":
projects.append(os.path.basename(root))
return sorted(projects)
def upload_status(successes, failures):
"""Upload main status page."""
env = Environment(loader=FileSystemLoader(os.path.join(SCRIPT_DIR,
'templates')))
data = {
'projects': failures + successes,
'failures': failures,
'successes': successes,
'last_updated': datetime.datetime.utcnow().ctime()
}
storage_client = storage.Client()
bucket = storage_client.get_bucket(STATUS_BUCKET)
blob = bucket.blob('status.html')
blob.cache_control = 'no-cache'
blob.upload_from_string(
env.get_template('status_template.html').render(data),
content_type='text/html')
blob = bucket.blob('status.json')
blob.cache_control = 'no-cache'
blob.upload_from_string(
json.dumps(data),
content_type='application/json')
def is_build_successful(build):
return build['status'] == 'SUCCESS'
def find_last_build(builds):
DELAY_MINUTES = 40
for build in builds:
if build['status'] == 'WORKING':
continue
finish_time = dateutil.parser.parse(build['finishTime'], ignoretz=True)
if (datetime.datetime.utcnow() - finish_time >=
datetime.timedelta(minutes=DELAY_MINUTES)):
storage_client = storage.Client()
status_bucket = storage_client.get_bucket(STATUS_BUCKET)
gcb_bucket = storage_client.get_bucket(LOGS_BUCKET)
log_name = 'log-{0}.txt'.format(build['id'])
log = gcb_bucket.blob(log_name)
dest_log = status_bucket.blob(log_name)
with tempfile.NamedTemporaryFile() as f:
log.download_to_filename(f.name)
dest_log.upload_from_filename(f.name, content_type='text/plain')
return build
return None
def main():
if len(sys.argv) != 2:
usage()
projects_dir = sys.argv[1]
credentials = GoogleCredentials.get_application_default()
cloudbuild = gcb_build('cloudbuild', 'v1', credentials=credentials)
successes = []
failures = []
for project in scan_project_names(projects_dir):
print project
query_filter = ('images="gcr.io/oss-fuzz/{0}"'.format(project))
try:
response = cloudbuild.projects().builds().list(
projectId='oss-fuzz',
pageSize=2,
filter=query_filter).execute()
except googleapiclient.errors.HttpError:
print >>sys.stderr, 'Failed to list builds for', project
continue
if not 'builds' in response:
continue
builds = response['builds']
last_build = find_last_build(builds)
if not last_build:
print >>sys.stderr, 'Failed to get build for', project
continue
print last_build['startTime'], last_build['status'], last_build['id']
if is_build_successful(last_build):
successes.append({
'name': project,
'build_id': last_build['id'],
'finish_time': last_build['finishTime'],
'success': True,
})
else:
failures.append({
'name': project,
'build_id': last_build['id'],
'finish_time': last_build['finishTime'],
'success': False,
})
upload_status(successes, failures)
if __name__ == "__main__":
main()
| apache-2.0 |
liguangsheng/shadowsocks | shadowsocks/tcprelay.py | 922 | 28870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
| apache-2.0 |
hastexo/edx-platform | common/djangoapps/django_comment_common/models.py | 1 | 7716 | import json
import logging
from config_models.models import ConfigurationModel
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_noop
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField, NoneToEmptyManager
from six import text_type
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
FORUM_ROLE_ADMINISTRATOR = ugettext_noop('Administrator')
FORUM_ROLE_MODERATOR = ugettext_noop('Moderator')
FORUM_ROLE_GROUP_MODERATOR = ugettext_noop('Group Moderator')
FORUM_ROLE_COMMUNITY_TA = ugettext_noop('Community TA')
FORUM_ROLE_STUDENT = ugettext_noop('Student')
@receiver(post_save, sender=CourseEnrollment)
def assign_default_role_on_enrollment(sender, instance, **kwargs):
"""
Assign forum default role 'Student'
"""
# The code below would remove all forum Roles from a user when they unenroll
# from a course. Concerns were raised that it should apply only to students,
# or that even the history of student roles is important for research
# purposes. Since this was new functionality being added in this release,
# I'm just going to comment it out for now and let the forums team deal with
# implementing the right behavior.
#
# # We've unenrolled the student, so remove all roles for this course
# if not instance.is_active:
# course_roles = list(Role.objects.filter(course_id=instance.course_id))
# instance.user.roles.remove(*course_roles)
# return
# We've enrolled the student, so make sure they have the Student role
assign_default_role(instance.course_id, instance.user)
def assign_default_role(course_id, user):
"""
Assign forum default role 'Student' to user
"""
assign_role(course_id, user, FORUM_ROLE_STUDENT)
def assign_role(course_id, user, rolename):
"""
Assign forum role `rolename` to user
"""
role, created = Role.objects.get_or_create(course_id=course_id, name=rolename)
if created:
logging.info("EDUCATOR-1635: Created role {} for course {}".format(role, course_id))
user.roles.add(role)
class Role(models.Model):
objects = NoneToEmptyManager()
name = models.CharField(max_length=30, null=False, blank=False)
users = models.ManyToManyField(User, related_name="roles")
course_id = CourseKeyField(max_length=255, blank=True, db_index=True)
class Meta(object):
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_role'
def __unicode__(self):
# pylint: disable=no-member
return self.name + " for " + (text_type(self.course_id) if self.course_id else "all courses")
# TODO the name of this method is a little bit confusing,
# since it's one-off and doesn't handle inheritance later
def inherit_permissions(self, role):
"""
Make this role inherit permissions from the given role.
Permissions are only added, not removed. Does not handle inheritance.
"""
if role.course_id and role.course_id != self.course_id:
logging.warning(
"%s cannot inherit permissions from %s due to course_id inconsistency",
self,
role,
)
for per in role.permissions.all():
self.add_permission(per)
def add_permission(self, permission):
self.permissions.add(Permission.objects.get_or_create(name=permission)[0])
def has_permission(self, permission):
"""Returns True if this role has the given permission, False otherwise."""
course = modulestore().get_course(self.course_id)
if course is None:
raise ItemNotFoundError(self.course_id)
if permission_blacked_out(course, {self.name}, permission):
return False
return self.permissions.filter(name=permission).exists()
class Permission(models.Model):
name = models.CharField(max_length=30, null=False, blank=False, primary_key=True)
roles = models.ManyToManyField(Role, related_name="permissions")
class Meta(object):
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_permission'
def __unicode__(self):
return self.name
def permission_blacked_out(course, role_names, permission_name):
"""Returns true if a user in course with the given roles would have permission_name blacked out.
This will return true if it is a permission that the user might have normally had for the course, but does not have
right this moment because we are in a discussion blackout period (as defined by the settings on the course module).
Namely, they can still view, but they can't edit, update, or create anything. This only applies to students, as
moderators of any kind still have posting privileges during discussion blackouts.
"""
return (
not course.forum_posts_allowed and
role_names == {FORUM_ROLE_STUDENT} and
any([permission_name.startswith(prefix) for prefix in ['edit', 'update', 'create']])
)
def all_permissions_for_user_in_course(user, course_id): # pylint: disable=invalid-name
"""Returns all the permissions the user has in the given course."""
if not user.is_authenticated():
return {}
course = modulestore().get_course(course_id)
if course is None:
raise ItemNotFoundError(course_id)
all_roles = {role.name for role in Role.objects.filter(users=user, course_id=course_id)}
permissions = {
permission.name
for permission
in Permission.objects.filter(roles__users=user, roles__course_id=course_id)
if not permission_blacked_out(course, all_roles, permission.name)
}
return permissions
class ForumsConfig(ConfigurationModel):
"""Config for the connection to the cs_comments_service forums backend."""
connection_timeout = models.FloatField(
default=5.0,
help_text="Seconds to wait when trying to connect to the comment service.",
)
@property
def api_key(self):
"""The API key used to authenticate to the comments service."""
return getattr(settings, "COMMENTS_SERVICE_KEY", None)
def __unicode__(self):
"""Simple representation so the admin screen looks less ugly."""
return u"ForumsConfig: timeout={}".format(self.connection_timeout)
class CourseDiscussionSettings(models.Model):
course_id = CourseKeyField(
unique=True,
max_length=255,
db_index=True,
help_text="Which course are these settings associated with?",
)
always_divide_inline_discussions = models.BooleanField(default=False)
_divided_discussions = models.TextField(db_column='divided_discussions', null=True, blank=True) # JSON list
COHORT = 'cohort'
ENROLLMENT_TRACK = 'enrollment_track'
NONE = 'none'
ASSIGNMENT_TYPE_CHOICES = ((NONE, 'None'), (COHORT, 'Cohort'), (ENROLLMENT_TRACK, 'Enrollment Track'))
division_scheme = models.CharField(max_length=20, choices=ASSIGNMENT_TYPE_CHOICES, default=NONE)
@property
def divided_discussions(self):
"""Jsonify the divided_discussions"""
return json.loads(self._divided_discussions)
@divided_discussions.setter
def divided_discussions(self, value):
"""Un-Jsonify the divided_discussions"""
self._divided_discussions = json.dumps(value)
| agpl-3.0 |
p/pycurl-archived | tests/internals_test.py | 2 | 6879 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from .util import StringIO
try:
import cPickle
except ImportError:
cPickle = None
import pickle
import gc
import copy
class InternalsTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
del self.curl
# /***********************************************************************
# // test misc
# ************************************************************************/
def test_constant_aliasing(self):
assert self.curl.URL is pycurl.URL
# /***********************************************************************
# // test handles
# ************************************************************************/
def test_remove_invalid_handle(self):
m = pycurl.CurlMulti()
try:
m.remove_handle(self.curl)
except pycurl.error:
pass
else:
assert False, "No exception when trying to remove a handle that is not in CurlMulti"
del m
def test_remove_invalid_closed_handle(self):
m = pycurl.CurlMulti()
c = pycurl.Curl()
c.close()
m.remove_handle(c)
del m, c
def test_add_closed_handle(self):
m = pycurl.CurlMulti()
c = pycurl.Curl()
c.close()
try:
m.add_handle(c)
except pycurl.error:
pass
else:
assert 0, "No exception when trying to add a close handle to CurlMulti"
m.close()
del m, c
def test_add_handle_twice(self):
m = pycurl.CurlMulti()
m.add_handle(self.curl)
try:
m.add_handle(self.curl)
except pycurl.error:
pass
else:
assert 0, "No exception when trying to add the same handle twice"
del m
def test_add_handle_on_multiple_stacks(self):
m1 = pycurl.CurlMulti()
m2 = pycurl.CurlMulti()
m1.add_handle(self.curl)
try:
m2.add_handle(self.curl)
except pycurl.error:
pass
else:
assert 0, "No exception when trying to add the same handle on multiple stacks"
del m1, m2
def test_move_handle(self):
m1 = pycurl.CurlMulti()
m2 = pycurl.CurlMulti()
m1.add_handle(self.curl)
m1.remove_handle(self.curl)
m2.add_handle(self.curl)
del m1, m2
# /***********************************************************************
# // test copying and pickling - copying and pickling of
# // instances of Curl and CurlMulti is not allowed
# ************************************************************************/
def test_copy_curl(self):
try:
copy.copy(self.curl)
# python 2 raises copy.Error, python 3 raises TypeError
except (copy.Error, TypeError):
pass
else:
assert False, "No exception when trying to copy a Curl handle"
def test_copy_multi(self):
m = pycurl.CurlMulti()
try:
copy.copy(m)
except (copy.Error, TypeError):
pass
else:
assert False, "No exception when trying to copy a CurlMulti handle"
def test_pickle_curl(self):
fp = StringIO()
p = pickle.Pickler(fp, 1)
try:
p.dump(self.curl)
# python 2 raises pickle.PicklingError, python 3 raises TypeError
except (pickle.PicklingError, TypeError):
pass
else:
assert 0, "No exception when trying to pickle a Curl handle"
del fp, p
def test_pickle_multi(self):
m = pycurl.CurlMulti()
fp = StringIO()
p = pickle.Pickler(fp, 1)
try:
p.dump(m)
except (pickle.PicklingError, TypeError):
pass
else:
assert 0, "No exception when trying to pickle a CurlMulti handle"
del m, fp, p
if cPickle is not None:
def test_cpickle_curl(self):
fp = StringIO()
p = cPickle.Pickler(fp, 1)
try:
p.dump(self.curl)
except cPickle.PicklingError:
pass
else:
assert 0, "No exception when trying to pickle a Curl handle via cPickle"
del fp, p
def test_cpickle_multi(self):
m = pycurl.CurlMulti()
fp = StringIO()
p = cPickle.Pickler(fp, 1)
try:
p.dump(m)
except cPickle.PicklingError:
pass
else:
assert 0, "No exception when trying to pickle a CurlMulti handle via cPickle"
del m, fp, p
# /***********************************************************************
# // test refcounts
# ************************************************************************/
# basic check of reference counting (use a memory checker like valgrind)
def test_reference_counting(self):
c = pycurl.Curl()
m = pycurl.CurlMulti()
m.add_handle(c)
del m
m = pycurl.CurlMulti()
c.close()
del m, c
def test_cyclic_gc(self):
gc.collect()
c = pycurl.Curl()
c.m = pycurl.CurlMulti()
c.m.add_handle(c)
# create some nasty cyclic references
c.c = c
c.c.c1 = c
c.c.c2 = c
c.c.c3 = c.c
c.c.c4 = c.m
c.m.c = c
c.m.m = c.m
c.m.c = c
# delete
gc.collect()
flags = gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE
# python 3 has no DEBUG_OBJECTS
#if hasattr(gc, 'DEBUG_OBJECTS'):
#flags |= gc.DEBUG_OBJECTS
#if opts.verbose >= 1:
#flags = flags | gc.DEBUG_STATS
gc.set_debug(flags)
gc.collect()
##print gc.get_referrers(c)
##print gc.get_objects()
#if opts.verbose >= 1:
#print("Tracked objects:", len(gc.get_objects()))
c_id = id(c)
# The `del' below should delete these 4 objects:
# Curl + internal dict, CurlMulti + internal dict
del c
gc.collect()
objects = gc.get_objects()
for object in objects:
assert id(object) != c_id
#if opts.verbose >= 1:
#print("Tracked objects:", len(gc.get_objects()))
def test_refcounting_bug_in_reset(self):
try:
range_generator = xrange
except NameError:
range_generator = range
# Ensure that the refcounting error in "reset" is fixed:
for i in range_generator(100000):
c = pycurl.Curl()
c.reset()
| lgpl-2.1 |
CiuffysHub/MITMf | mitmflib-0.18.4/mitmflib/impacket/krb5/ccache.py | 2 | 17647 | # Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# Kerberos Credential Cache format implementation
# based on file format described at:
# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt
# Pretty lame and quick implementation, not a fun thing to do
# Contribution is welcome to make it the right way
#
from datetime import datetime
from struct import pack, unpack, calcsize
from pyasn1.codec.der import decoder, encoder
from binascii import hexlify
from mitmflib.impacket.structure import Structure
from mitmflib.impacket.krb5 import crypto, constants, types
from mitmflib.impacket.krb5.asn1 import AS_REP, seq_set, TGS_REP, EncTGSRepPart, EncASRepPart, Ticket
DELTA_TIME = 1
class Header(Structure):
structure = (
('tag','!H=0'),
('taglen','!H=0'),
('_tagdata','_-tagdata','self["taglen"]'),
('tagdata',':'),
)
class DeltaTime(Structure):
structure = (
('time_offset','!L=0'),
('usec_offset','!L=0'),
)
class CountedOctetString(Structure):
structure = (
('length','!L=0'),
('_data','_-data','self["length"]'),
('data',':'),
)
def prettyPrint(self, indent=''):
return "%s%s" % (indent, hexlify(self['data']))
class KeyBlock(Structure):
structure = (
('keytype','!H=0'),
('etype','!H=0'),
('keylen','!H=0'),
('_keyvalue','_-keyvalue','self["keylen"]'),
('keyvalue',':'),
)
def prettyPrint(self):
return "Key: (0x%x)%s" % (self['keytype'], hexlify(self['keyvalue']))
class Times(Structure):
structure = (
('authtime','!L=0'),
('starttime','!L=0'),
('endtime','!L=0'),
('renew_till','!L=0'),
)
def prettyPrint(self, indent = ''):
print "%sAuth : %s" % (indent, datetime.fromtimestamp(self['authtime']).isoformat())
print "%sStart: %s" % (indent, datetime.fromtimestamp(self['starttime']).isoformat())
print "%sEnd : %s" % (indent, datetime.fromtimestamp(self['endtime']).isoformat())
print "%sRenew: %s" % (indent, datetime.fromtimestamp(self['renew_till']).isoformat())
class Address(Structure):
structure = (
('addrtype','!H=0'),
('addrdata',':', CountedOctetString),
)
class AuthData(Structure):
structure = (
('authtype','!H=0'),
('authdata',':', CountedOctetString),
)
class Principal():
class PrincipalHeader(Structure):
structure = (
('name_type','!L=0'),
('num_components','!L=0'),
)
components = []
realm = None
def __init__(self, data=None):
if data is not None:
self.header = self.PrincipalHeader(data)
data = data[len(self.header):]
self.realm = CountedOctetString(data)
data = data[len(self.realm):]
self.components = []
for component in range(self.header['num_components']):
comp = CountedOctetString(data)
data = data[len(comp):]
self.components.append(comp)
else:
self.header = self.PrincipalHeader()
def __len__(self):
totalLen = len(self.header) + len(self.realm)
for i in self.components:
totalLen += len(i)
return totalLen
def getData(self):
data = self.header.getData() + self.realm.getData()
for component in self.components:
data += component.getData()
return data
def __str__(self):
return self.getData()
def prettyPrint(self):
principal = ''
for component in self.components:
principal += component['data'] + '/'
principal = principal[:-1]
principal += '@' + self.realm['data']
return principal
def fromPrincipal(self, principal):
self.header['name_type'] = principal.type
self.header['num_components'] = len(principal.components)
octetString = CountedOctetString()
octetString['length'] = len(principal.realm)
octetString['data'] = principal.realm
self.realm = octetString
self.components = []
for c in principal.components:
octetString = CountedOctetString()
octetString['length'] = len(c)
octetString['data'] = c
self.components.append(octetString)
def toPrincipal(self):
return types.Principal(self.prettyPrint(), type=self.header['name_type'])
class Credential():
class CredentialHeader(Structure):
structure = (
('client',':', Principal),
('server',':', Principal),
('key',':', KeyBlock),
('time',':', Times),
('is_skey','B=0'),
('tktflags','!L=0'),
('num_address','!L=0'),
)
addresses = ()
authData = ()
header = None
ticket = None
secondTicket = None
def __init__(self, data=None):
if data is not None:
self.header = self.CredentialHeader(data)
data = data[len(self.header):]
self.addresses = []
for address in range(self.header['num_address']):
ad = Address(data)
data = data[len(ad):]
self.addresses.append(ad)
num_authdata = unpack('!L', data[:4])[0]
data = data[calcsize('!L'):]
for authdata in range(num_authdata):
ad = AuthData(data)
data = data[len(ad):]
self.authData.append(ad)
self.ticket = CountedOctetString(data)
data = data[len(self.ticket):]
self.secondTicket = CountedOctetString(data)
data = data[len( self.secondTicket):]
else:
self.header = self.CredentialHeader()
def __getitem__(self, key):
return self.header[key]
def __setitem__(self, item, value):
self.header[item] = value
def getServerPrincipal(self):
return self.header['server'].prettyPrint()
def __len__(self):
totalLen = len(self.header)
for i in self.addresses:
totalLen += len(i)
totalLen += calcsize('!L')
for i in self.authData:
totalLen += len(i)
totalLen += len(self.ticket)
totalLen += len(self.secondTicket)
return totalLen
def dump(self):
self.header.dump()
def getData(self):
data = self.header.getData()
for i in self.addresses:
data += i.getData()
data += pack('!L', len(self.authData))
for i in self.authData:
data += i.getData()
data += self.ticket.getData()
data += self.secondTicket.getData()
return data
def __str__(self):
return self.getData()
def prettyPrint(self, indent=''):
print "%sClient: %s" % (indent, self.header['client'].prettyPrint())
print "%sServer: %s" % (indent, self.header['server'].prettyPrint())
print "%s%s" % (indent, self.header['key'].prettyPrint())
print "%sTimes: " % indent
self.header['time'].prettyPrint('\t\t')
print "%sSubKey: %s" % (indent, self.header['is_skey'])
print "%sFlags: 0x%x" % (indent, self.header['tktflags'])
print "%sAddresses: %d" % (indent, self.header['num_address'])
for address in self.addresses:
address.prettyPrint('\t\t')
print "%sAuth Data: %d" % (indent, len(self.authData))
for ad in self.authData:
ad.prettyPrint('\t\t')
print "%sTicket: %s" % (indent, self.ticket.prettyPrint())
print "%sSecond Ticket: %s" % (indent, self.secondTicket.prettyPrint())
def toTGT(self):
tgt_rep = AS_REP()
tgt_rep['pvno'] = 5
tgt_rep['msg-type'] = int(constants.ApplicationTagNumbers.AP_REP.value)
tgt_rep['crealm'] = self['server'].realm['data']
# Fake EncryptedData
tgt_rep['enc-part'] = None
tgt_rep['enc-part']['etype'] = 1
tgt_rep['enc-part']['cipher'] = ''
seq_set(tgt_rep, 'cname', self['client'].toPrincipal().components_to_asn1)
ticket = types.Ticket()
ticket.from_asn1(self.ticket['data'])
seq_set(tgt_rep,'ticket', ticket.to_asn1)
cipher = crypto._enctype_table[self['key']['keytype']]()
tgt = dict()
tgt['KDC_REP'] = encoder.encode(tgt_rep)
tgt['cipher'] = cipher
tgt['sessionKey'] = crypto.Key(cipher.enctype, str(self['key']['keyvalue']))
return tgt
def toTGS(self):
tgs_rep = TGS_REP()
tgs_rep['pvno'] = 5
tgs_rep['msg-type'] = int(constants.ApplicationTagNumbers.TGS_REP.value)
tgs_rep['crealm'] = self['server'].realm['data']
# Fake EncryptedData
tgs_rep['enc-part'] = None
tgs_rep['enc-part']['etype'] = 1
tgs_rep['enc-part']['cipher'] = ''
seq_set(tgs_rep, 'cname', self['client'].toPrincipal().components_to_asn1)
ticket = types.Ticket()
ticket.from_asn1(self.ticket['data'])
seq_set(tgs_rep,'ticket', ticket.to_asn1)
cipher = crypto._enctype_table[self['key']['keytype']]()
tgs = dict()
tgs['KDC_REP'] = encoder.encode(tgs_rep)
tgs['cipher'] = cipher
tgs['sessionKey'] = crypto.Key(cipher.enctype, str(self['key']['keyvalue']))
return tgs
class CCache():
headers = None
principal = None
credentials = []
miniHeader = None
class MiniHeader(Structure):
structure = (
('file_format_version','!H=0x0504'),
('headerlen','!H=12'),
)
def __init__(self, data = None):
if data is not None:
miniHeader = self.MiniHeader(data)
data = data[len(str(miniHeader)):]
headerLen = miniHeader['headerlen']
self.headers = []
while headerLen > 0:
header = Header(data)
self.headers.append(header)
headerLen -= len(header)
data = data[len(header):]
# Now the primary_principal
self.principal = Principal(data)
data = data[len(self.principal):]
# Now let's parse the credentials
self.credentials = []
while len(data) > 0:
cred = Credential(data)
self.credentials.append(cred)
data = data[len(cred.getData()):]
def getData(self):
data = self.MiniHeader().getData()
for header in self.headers:
data += header.getData()
data += self.principal.getData()
for credential in self.credentials:
data += credential.getData()
return data
def getCredential(self, server):
for c in self.credentials:
if c['server'].prettyPrint().upper() == server.upper():
return c
return None
def toTimeStamp(self, dt, epoch=datetime(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 1e6
def reverseFlags(self, flags):
result = 0
if isinstance(flags, str):
flags = flags[1:-2]
for i,j in enumerate(reversed(flags)):
if j != 0:
result += j << i
return result
def fromTGT(self, tgt, oldSessionKey, sessionKey):
self.headers = []
header = Header()
header['tag'] = 1
header['taglen'] = 8
header['tagdata'] = '\xff\xff\xff\xff\x00\x00\x00\x00'
self.headers.append(header)
decodedTGT = decoder.decode(tgt, asn1Spec = AS_REP())[0]
tmpPrincipal = types.Principal()
tmpPrincipal.from_asn1(decodedTGT, 'crealm', 'cname')
self.principal = Principal()
self.principal.fromPrincipal(tmpPrincipal)
# Now let's add the credential
cipherText = decodedTGT['enc-part']['cipher']
cipher = crypto._enctype_table[decodedTGT['enc-part']['etype']]
# Key Usage 3
# AS-REP encrypted part (includes TGS session key or
# application session key), encrypted with the client key
# (Section 5.4.2)
plainText = cipher.decrypt(oldSessionKey, 3, str(cipherText))
encASRepPart = decoder.decode(plainText, asn1Spec = EncASRepPart())[0]
credential = Credential()
server = types.Principal()
server.from_asn1(encASRepPart, 'srealm', 'sname')
tmpServer = Principal()
tmpServer.fromPrincipal(server)
credential['client'] = self.principal
credential['server'] = tmpServer
credential['is_skey'] = 0
credential['key'] = KeyBlock()
credential['key']['keytype'] = int(encASRepPart['key']['keytype'])
credential['key']['keyvalue'] = str(encASRepPart['key']['keyvalue'])
credential['key']['keylen'] = len(credential['key']['keyvalue'])
credential['time'] = Times()
credential['time']['authtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['authtime']))
credential['time']['starttime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['starttime']))
credential['time']['endtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['endtime']))
credential['time']['renew_till'] = self.toTimeStamp(types.KerberosTime.from_asn1(encASRepPart['renew-till']))
flags = self.reverseFlags(encASRepPart['flags'])
credential['tktflags'] = flags
credential['num_address'] = 0
credential.ticket = CountedOctetString()
credential.ticket['data'] = encoder.encode(decodedTGT['ticket'].clone(tagSet=Ticket.tagSet, cloneValueFlag=True))
credential.ticket['length'] = len(credential.ticket['data'])
credential.secondTicket = CountedOctetString()
credential.secondTicket['data'] = ''
credential.secondTicket['length'] = 0
self.credentials.append(credential)
def fromTGS(self, tgs, oldSessionKey, sessionKey):
self.headers = []
header = Header()
header['tag'] = 1
header['taglen'] = 8
header['tagdata'] = '\xff\xff\xff\xff\x00\x00\x00\x00'
self.headers.append(header)
decodedTGS = decoder.decode(tgs, asn1Spec = TGS_REP())[0]
tmpPrincipal = types.Principal()
tmpPrincipal.from_asn1(decodedTGS, 'crealm', 'cname')
self.principal = Principal()
self.principal.fromPrincipal(tmpPrincipal)
# Now let's add the credential
cipherText = decodedTGS['enc-part']['cipher']
cipher = crypto._enctype_table[decodedTGS['enc-part']['etype']]
# Key Usage 8
# TGS-REP encrypted part (includes application session
# key), encrypted with the TGS session key (Section 5.4.2)
plainText = cipher.decrypt(oldSessionKey, 8, str(cipherText))
encTGSRepPart = decoder.decode(plainText, asn1Spec = EncTGSRepPart())[0]
credential = Credential()
server = types.Principal()
server.from_asn1(encTGSRepPart, 'srealm', 'sname')
tmpServer = Principal()
tmpServer.fromPrincipal(server)
credential['client'] = self.principal
credential['server'] = tmpServer
credential['is_skey'] = 0
credential['key'] = KeyBlock()
credential['key']['keytype'] = int(encTGSRepPart['key']['keytype'])
credential['key']['keyvalue'] = str(encTGSRepPart['key']['keyvalue'])
credential['key']['keylen'] = len(credential['key']['keyvalue'])
credential['time'] = Times()
credential['time']['authtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['authtime']))
credential['time']['starttime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['starttime']))
credential['time']['endtime'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['endtime']))
credential['time']['renew_till'] = self.toTimeStamp(types.KerberosTime.from_asn1(encTGSRepPart['renew-till']))
flags = self.reverseFlags(encTGSRepPart['flags'])
credential['tktflags'] = flags
credential['num_address'] = 0
credential.ticket = CountedOctetString()
credential.ticket['data'] = encoder.encode(decodedTGS['ticket'].clone(tagSet=Ticket.tagSet, cloneValueFlag=True))
credential.ticket['length'] = len(credential.ticket['data'])
credential.secondTicket = CountedOctetString()
credential.secondTicket['data'] = ''
credential.secondTicket['length'] = 0
self.credentials.append(credential)
@classmethod
def loadFile(cls, fileName):
f = open(fileName,'rb')
data = f.read()
f.close()
return cls(data)
def saveFile(self, fileName):
f = open(fileName,'wb+')
f.write(self.getData())
f.close()
def prettyPrint(self):
print "Primary Principal: %s" % self.principal.prettyPrint()
print "Credentials: "
for i, credential in enumerate(self.credentials):
print "[%d]" % i
credential.prettyPrint('\t')
if __name__ == '__main__':
import os
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
ccache.prettyPrint()
| gpl-3.0 |
TeamNyx/external_gtest | test/gtest_uninitialized_test.py | 2901 | 2480 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
shlomif/rhizi | src/local/server-tools/rz-user-tool.py | 4 | 7072 | #!/usr/bin/python2.7
# This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import pickle
import pwd
import re
import sys
from getpass import getpass
server_path = None
for parts in [('..', '..', 'server'), ('..', '..', 'rhizi-server')]:
candidate = os.path.realpath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), *parts))
if os.path.exists(candidate):
server_path = candidate
break
if server_path is None:
print("error: path not setup correctly")
raise SystemExit
sys.path.append(server_path)
from crypt_util import hash_pw
from rz_server import init_config
from rz_user import User_Account
from rz_user_db import User_DB
def add_user_login(user_db, salt, first_name, last_name,
rz_username, email_address, pw_plaintext):
pw_hash = hash_pw(str(pw_plaintext), salt)
u_account = User_Account(first_name=first_name,
last_name=last_name,
rz_username=rz_username,
email_address=email_address,
pw_hash=pw_hash,
role_set=['user'])
user_db.user_add(u_account)
def init_pw_db(cfg, user_pw_list_file, user_db_path, ugid_str='www-data'):
"""
@param ugid_str: shared uid, gid set on generated file
"""
if os.path.exists(user_db_path):
print('user_db_path already exists, aborting: ' + user_db_path)
return
user_db = User_DB(db_path=user_db_path)
user_db.init(mode='n') # always create a new, empty database, open for reading and writing
assert len(salt) > 8, 'server-key not found or too short'
print('using config secret_key for salt generation: ' + salt[:3] + '...')
u_count = 0
with open(user_pw_list_file, 'r') as f:
for line in f:
if re.match('(^#)|(\s+$)', line):
continue
kv_arr = line.split(',')
if 5 != len(kv_arr):
raise Exception('failed to parse first-name,last-name,email,user,pw line: ' + line)
first_name, last_name, rz_username, email_address, pw_plaintext = map(str.strip, kv_arr)
add_user_login(user_db=user_db,
self=cfg.secret_key,
first_name=first_name,
last_name=last_name,
rz_username=rz_username,
email_address=email_address,
pw_plaintext=pw_plaintext)
print('user_db: added entry: rz_username: %s, pw: %s...' % (rz_username, pw_plaintext[:3]))
u_count = u_count + 1
user_db.shutdown()
ugid = pwd.getpwnam(ugid_str).pw_uid
os.chown(user_db_path, ugid, ugid)
print('user_db generated: path: %s, user-count: %d' % (user_db_path, u_count))
def open_existing_user_db(user_db_path):
user_db = User_DB(db_path=user_db_path)
user_db.init(mode='w')
return user_db
def role_add(user_db_path, user_email, role):
user_db = open_existing_user_db(user_db_path)
uid, u = user_db.lookup_user__by_email_address(user_email)
user_db.user_add_role(uid, role)
def role_rm(user_db_path, user_email, role):
user_db = open_existing_user_db(user_db_path)
uid, u = user_db.lookup_user__by_email_address(user_email)
user_db.user_rm_role(uid, role)
def list_users(user_db_path):
user_db = open_existing_user_db(user_db_path)
for user in user_db:
print('{}'.format(user))
def add_user(user_db_path, cfg, email, password, first, last, username):
user_db = open_existing_user_db(user_db_path)
add_user_login(user_db=user_db,
salt=cfg.secret_key,
first_name=first,
last_name=last,
rz_username=username,
email_address=email,
pw_plaintext=password)
def main():
commands = ['role-add', 'role-rm', 'list', 'add']
p = argparse.ArgumentParser(description='rz-cli tool. You must provide a command, one of:\n{}'.format(commands))
p.add_argument('--config-dir', help='path to Rhizi config dir', default='res/etc')
p.add_argument('--user-db-path', help='path to user_db (ignore config)')
p.add_argument('--init-user-db', help='init user db', action='store_const', const=True)
p.add_argument('--user-db-init-file', help='user_db db initialization file in \'user,pw\' format')
p.add_argument('--email', help='email of user to operate on')
p.add_argument('--role', help='role to add or remove to/from user, i.e. admin or user')
p.add_argument('--first-name', help="first name for added user")
p.add_argument('--last-name', help='last name for added user')
p.add_argument('--username', help='username for added user')
args, rest = p.parse_known_args()
illegal = False
if len(rest) != 1:
print("only one non argument parameter expected")
illegal = True
elif rest[0] not in commands:
print("command not in {}".format(commands))
illegal = True
elif rest[0] in ['role-add', 'role-rm'] and not args.email:
print("command {} requires an email argument".format(command))
illegal = True
elif rest[0] == 'add' and None in set([args.first_name, args.last_name, args.username]):
print("missing one of first-name, last-name or username for user addition")
illegal = True
if illegal:
p.print_help()
raise SystemExit
command = rest[0]
cfg = init_config(args.config_dir)
user_db_path = args.user_db_path if args.user_db_path is not None else cfg.user_db_path
if args.init_user_db:
init_pw_db(cfg, args.user_db_init_file, user_db_path)
exit(0)
if command == 'role-add':
role_add(user_db_path, args.email, args.role)
elif command == 'role-rm':
role_rm(user_db_path, args.email, args.role)
elif command == 'list':
list_users(user_db_path)
elif command == 'add':
print("please enter password:")
password = getpass()
add_user(user_db_path=user_db_path, cfg=cfg, email=args.email, password=password,
first=args.first_name, last=args.last_name, username=args.username)
if __name__ == '__main__':
main()
| agpl-3.0 |
beppec56/core | uitest/writer_tests/insertTableDialog.py | 1 | 3904 | #
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from uitest.framework import UITestCase
from libreoffice.uno.propertyvalue import mkPropertyValues
class WriterInsertTableDialog(UITestCase):
def insert_table(self, name, rows, cols):
self.ui_test.create_doc_in_start_center("writer")
self.ui_test.execute_dialog_through_command(".uno:InsertTable")
xDialog = self.xUITest.getTopFocusWindow()
xNameEdit = xDialog.getChild("nameedit")
xNameEdit.executeAction("TYPE", mkPropertyValues({"KEYCODE":"CTRL+A"}))
xNameEdit.executeAction("TYPE", mkPropertyValues({"TEXT": name}))
xColSpin = xDialog.getChild("colspin")
xColSpin.executeAction("TYPE", mkPropertyValues({"KEYCODE":"CTRL+A"}))
xColSpin.executeAction("TYPE", mkPropertyValues({"TEXT": str(cols)}))
xRowSpin = xDialog.getChild("rowspin")
xRowSpin.executeAction("TYPE", mkPropertyValues({"KEYCODE":"CTRL+A"}))
xRowSpin.executeAction("TYPE", mkPropertyValues({"TEXT": str(rows)}))
xOkBtn = xDialog.getChild("ok")
xOkBtn.executeAction("CLICK", tuple())
document = self.ui_test.get_component()
tables = document.getTextTables()
self.assertEqual(tables[0].getName(), name)
self.assertEqual(len(tables[0].getRows()), rows)
self.assertEqual(len(tables[0].getColumns()), cols)
def insertTextIntoCell(self, table, cellName, text ):
tableText = table.getCellByName( cellName )
tableText.setString( text )
def test_tdf80663(self):
self.insert_table("Test1", 2, 2)
document = self.ui_test.get_component()
tables = document.getTextTables()
self.xUITest.executeCommand(".uno:DeleteRows")
self.assertEqual(len(tables[0].getRows()), 1)
self.assertEqual(len(tables[0].getColumns()), 2)
self.xUITest.executeCommand(".uno:Undo")
self.assertEqual(len(tables[0].getRows()), 2)
self.assertEqual(len(tables[0].getColumns()), 2)
self.ui_test.close_doc()
def test_tdf96067(self):
self.insert_table("Test2", 3, 3)
self.xUITest.executeCommand(".uno:SelectTable")
self.xUITest.executeCommand(".uno:InsertRowsBefore")
document = self.ui_test.get_component()
tables = document.getTextTables()
self.assertEqual(len(tables[0].getRows()), 6)
self.assertEqual(len(tables[0].getColumns()), 3)
self.xUITest.executeCommand(".uno:Undo")
self.ui_test.close_doc()
def test_tdf104158(self):
self.insert_table("Test3", 2, 2)
self.ui_test.execute_dialog_through_command(".uno:TableNumberFormatDialog")
xNumberFormatDlg = self.xUITest.getTopFocusWindow()
xOkBtn = xNumberFormatDlg.getChild("ok")
xOkBtn.executeAction("CLICK", tuple())
self.ui_test.close_doc()
def test_tdf87199(self):
self.insert_table("Test4", 2, 1)
document = self.ui_test.get_component()
tables = document.getTextTables()
self.insertTextIntoCell(tables[0], "A1", "test" )
self.insertTextIntoCell(tables[0], "A2", "test" )
cursor = tables[0].getCellByName( "A1" ).createTextCursor()
self.xUITest.executeCommand(".uno:EntireColumn")
self.xUITest.executeCommand(".uno:MergeCells")
tables = document.getTextTables()
self.assertEqual(len(tables[0].getRows()), 1)
self.assertEqual(len(tables[0].getColumns()), 1)
self.xUITest.executeCommand(".uno:Undo")
self.assertEqual(len(tables[0].getRows()), 2)
self.assertEqual(len(tables[0].getColumns()), 1)
self.ui_test.close_doc()
# vim: set shiftwidth=4 softtabstop=4 expandtab:
| gpl-3.0 |
albertomurillo/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vm_facts.py | 40 | 4928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vm_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machines
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machines."
notes:
- "This module creates a new top-level C(ovirt_vms) fact, which
contains a list of virtual machines."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search VM X from cluster Y use following pattern:
name=X and cluster=Y"
all_content:
description:
- "If I(true) all the attributes of the virtual machines should be
included in the response."
type: bool
case_sensitive:
description:
- "If I(true) performed search will take case into account."
type: bool
default: true
max:
description:
- "The maximum number of results to return."
next_run:
description:
- "Indicates if the returned result describes the virtual machine as it is currently running or if describes
the virtual machine with the modifications that have already been performed but that will only come into
effect when the virtual machine is restarted. By default the value is set by engine."
type: bool
version_added: "2.8"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all VMs which names start with C(centos) and
# belong to cluster C(west):
- ovirt_vm_facts:
pattern: name=centos* and cluster=west
- debug:
var: ovirt_vms
# Gather info about next run configuration of virtual machine named myvm
- ovirt_vm_facts:
pattern: name=myvm
next_run: true
- debug:
var: ovirt_vms[0]
'''
RETURN = '''
ovirt_vms:
description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
all_content=dict(default=False, type='bool'),
next_run=dict(default=None, type='bool'),
case_sensitive=dict(default=True, type='bool'),
max=dict(default=None, type='int'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms = vms_service.list(
search=module.params['pattern'],
all_content=module.params['all_content'],
case_sensitive=module.params['case_sensitive'],
max=module.params['max'],
)
if module.params['next_run']:
vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_vms=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in vms
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
mozilla/inferno | test/lib/test_archiver.py | 4 | 6834 | from nose.tools import eq_
from nose.tools import ok_
from inferno.lib.archiver import Archiver
from test.mock.disco import DDFS
class TestArchiver(object):
def _setup(self, tags=()):
self.archiver = Archiver(
ddfs=DDFS(),
archive_prefix='processed',
archive_mode=True,
max_blobs=100,
tags=tags)
def test_get_archive_name(self):
self._setup()
tag = 'incoming:data:chunk:2012-12-01'
actual = self.archiver._get_archive_name(tag)
eq_(actual, 'processed:data:chunk:2012-12-01')
def test_blob_count(self):
self._setup()
self.archiver.tag_map = self.fake_tag_map
eq_(self.archiver.blob_count, 5)
def test_job_blobs(self):
self._setup()
self.archiver.tag_map = self.fake_tag_map
expected = [
('blob1.a', 'blob1.b', 'blob1.c'),
('blob2.a', 'blob2.b', 'blob2.c'),
('blob3.a', 'blob3.b', 'blob3.c'),
('blob4.a', 'blob4.b', 'blob4.c'),
('blob5.a', 'blob5.b', 'blob5.c')]
eq_(self.archiver.job_blobs, expected)
@property
def fake_tag_map(self):
return {
'tag1': [
('blob1.a', 'blob1.b', 'blob1.c'),
('blob2.a', 'blob2.b', 'blob2.c')],
'tag2': [
('blob3.a', 'blob3.b', 'blob3.c'),
('blob4.a', 'blob4.b', 'blob4.c'),
('blob5.a', 'blob5.b', 'blob5.c')]}
def test_archive(self):
incoming_tag = 'incoming:data:chunk:2011-11-13'
archived_tag = 'processed:data:chunk:2011-11-13'
# no archived tags before the archive call
self._setup(tags=[incoming_tag])
eq_([incoming_tag], self.archiver.ddfs.list(incoming_tag))
eq_([], self.archiver.ddfs.list(archived_tag))
# one archived tag after the archive call
self.archiver.archive()
eq_([incoming_tag], self.archiver.ddfs.list(incoming_tag))
eq_([archived_tag], self.archiver.ddfs.list(archived_tag))
# incoming and archived tags point to the same blobs
expected_blobs = [
('/b13.1', '/b13.2', '/b13.3'),
('/b13.1.a', '/b13.2.a', '/b13.3.a')]
incoming_blobs = self.archiver.ddfs.blobs(incoming_tag)
archived_blobs = self.archiver.ddfs.blobs(archived_tag)
eq_(incoming_blobs, expected_blobs)
eq_(archived_blobs, expected_blobs)
def test_replica_agnostic_archive(self):
incoming_tag = "incoming:froody:chunk:2012-05-17"
processed_tag = "processed:froody:chunk:2012-05-17"
self._setup(tags=[incoming_tag])
self.archiver.archive()
eq_(len(self.archiver.ddfs.blobs(processed_tag)), 1)
class TestBuildTagMap(object):
def _setup(self, archive_mode=True, max_blobs=100, archive_some=False):
ddfs = DDFS()
if archive_some:
blobs = ('/b13.1', '/b13.2', '/b13.3')
ddfs.ddfs['processed:data:chunk:2011-11-13'] = [blobs]
self.archiver = Archiver(
ddfs=ddfs,
archive_prefix='processed',
archive_mode=archive_mode,
max_blobs=max_blobs,
tags=['incoming:data:chunk'])
def test_tag_map(self):
self._setup()
self._assert_full_tag_map()
def test_partially_archived(self):
self._setup(archive_some=True)
self._assert_tag_map_minus_processed()
def test_archive_mode_off(self):
self._setup(archive_some=True, archive_mode=False)
self._assert_full_tag_map()
def test_max_blobs_zero(self):
expected = {'incoming:data:chunk:2011-11-14': []}
self._assert_max_blobs(expected, max_blobs=0)
def test_max_blobs_some_of_one_tag(self):
expected = {
'incoming:data:chunk:2011-11-14': [
('/b14.1', '/b14.2', '/b14.3'),
('/b14.1.a', '/b14.2.a', '/b14.3.a'),
]}
self._assert_max_blobs(expected, max_blobs=2)
def test_max_blobs_exactly_one_tag(self):
expected = {
'incoming:data:chunk:2011-11-14': [
('/b14.1', '/b14.2', '/b14.3'),
('/b14.1.a', '/b14.2.a', '/b14.3.a'),
('/b14.1.b', '/b14.2.b', '/b14.3.b')]}
self._assert_max_blobs(expected, max_blobs=3)
def test_max_blobs_more_than_one_tag(self):
expected = {
'incoming:data:chunk:2011-11-13': [
('/b13.1.a', '/b13.2.a', '/b13.3.a')],
'incoming:data:chunk:2011-11-14': [
('/b14.1', '/b14.2', '/b14.3'),
('/b14.1.a', '/b14.2.a', '/b14.3.a'),
('/b14.1.b', '/b14.2.b', '/b14.3.b')]}
self._assert_max_blobs(expected, max_blobs=4)
def _assert_max_blobs(self, expected, max_blobs):
self._setup(archive_some=True, max_blobs=max_blobs)
self._compare_blobs(self.archiver.tag_map, expected)
eq_(self.archiver.blob_count, max_blobs)
def _assert_full_tag_map(self):
expected = {
'incoming:data:chunk:2011-11-11': [
('/b11.1', '/b11.2', '/b11.3')],
'incoming:data:chunk:2011-11-12': [
('/b12.1', '/b12.2', '/b12.3')],
'incoming:data:chunk:2011-11-13': [
('/b13.1', '/b13.2', '/b13.3'),
('/b13.1.a', '/b13.2.a', '/b13.3.a')],
'incoming:data:chunk:2011-11-14': [
('/b14.1', '/b14.2', '/b14.3'),
('/b14.1.a', '/b14.2.a', '/b14.3.a'),
('/b14.1.b', '/b14.2.b', '/b14.3.b')]}
self._compare_blobs(self.archiver.tag_map, expected)
eq_(self.archiver.blob_count, 7)
def _assert_tag_map_minus_processed(self):
expected = {
'incoming:data:chunk:2011-11-11': [
('/b11.1', '/b11.2', '/b11.3')],
'incoming:data:chunk:2011-11-12': [
('/b12.1', '/b12.2', '/b12.3')],
'incoming:data:chunk:2011-11-13': [
('/b13.1.a', '/b13.2.a', '/b13.3.a')],
'incoming:data:chunk:2011-11-14': [
('/b14.1', '/b14.2', '/b14.3'),
('/b14.1.a', '/b14.2.a', '/b14.3.a'),
('/b14.1.b', '/b14.2.b', '/b14.3.b')]}
self._compare_blobs(self.archiver.tag_map, expected)
eq_(self.archiver.blob_count, 6)
def _compare_blobs(self, tag_map, expected):
eq_(len(expected), len(tag_map))
for tag, blob in expected.iteritems():
ok_(tag in tag_map)
tag_map_blob = tag_map[tag]
eq_(len(tag_map_blob), len(blob))
for replica in blob:
try:
ok_(replica in tag_map_blob)
except Exception as e:
raise e
| mit |
henryfjordan/django | django/contrib/gis/utils/ogrinspect.py | 391 | 9090 | """
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
| bsd-3-clause |
savoca/ifc6540 | scripts/build-all.py | 26 | 14705 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def build_threads():
"""Determine the number of build threads requested by the user"""
if all_options.load_average:
return all_options.load_average
return all_options.jobs or 1
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self):
self.sequence = []
self.lock = threading.Lock()
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = build_threads()
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# If we are requesting multiple builds, divide down the job number
# to construct the make_command, giving it a floor of 2, so there
# is still some parallelism.
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / len(targets), 2)
make_command.append("-j" + str(j))
tracker = BuildTracker()
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
jzmnd/Stino | stino/pyarduino/arduino_target_platform.py | 14 | 1331 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# 1. Copyright
# 2. Lisence
# 3. Author
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from . import base
class TargetPlatform(object):
def __init__(self, root_dirs):
self.target_platform = None
self.root_dirs = root_dirs
self.settings = base.settings.get_arduino_settings()
self.update()
def update(self):
target_board_id = self.settings.get('target_board_id', '')
if target_board_id:
ids = target_board_id.split('.')[:-1]
target_platform_id = '.'.join(ids)
for root_dir in self.root_dirs:
for package in root_dir.get_packages():
for platform in package.get_platforms():
if platform.get_id() == target_platform_id:
self.target_platform = platform
break
if self.target_platform:
break
if self.target_platform:
break
def get_target_platform(self):
return self.target_platform
def get_target_platform_file(self):
return self.target_platform_file
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django/contrib/gis/tests/test_spatialrefsys.py | 88 | 6775 | from django.db import connection
from django.contrib.gis.gdal import GDAL_VERSION
from django.contrib.gis.tests.utils import no_mysql, oracle, postgis, spatialite
from django.utils import unittest
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
if oracle:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
if GDAL_VERSION <= (1, 8):
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| agpl-3.0 |
happy5214/pywikibot-core | tests/data_ingestion_tests.py | 6 | 4044 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for data_ingestion.py script."""
#
# (C) Pywikibot team, 2012-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from scripts import data_ingestion
from tests import join_data_path, join_images_path
from tests.aspects import unittest, TestCase, ScriptMainTestCase
class TestPhoto(TestCase):
"""Test Photo class."""
sites = {
'wm-upload': {
'hostname': 'upload.wikimedia.org',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
def setUp(self):
"""Set up unit test."""
super(TestPhoto, self).setUp()
self.obj = data_ingestion.Photo(
URL='http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png',
metadata={'description.en': '"Sounds" icon',
'source': 'http://commons.wikimedia.org/wiki/File:Sound-icon.svg',
'author': 'KDE artists | Silstor',
'license': 'LGPL',
'set': 'Crystal SVG icon set',
'name': 'Sound icon'},
site=self.get_site('commons'))
def test_downloadPhoto(self):
"""Test download from http://upload.wikimedia.org/."""
with open(join_images_path('MP_sounds.png'), 'rb') as f:
self.assertEqual(f.read(), self.obj.downloadPhoto().read())
def test_findDuplicateImages(self):
"""Test finding duplicates on Wikimedia Commons."""
duplicates = self.obj.findDuplicateImages()
self.assertIn('MP sounds.png', [dup.replace("_", " ") for dup in duplicates])
def test_getTitle(self):
"""Test getTitle()."""
self.assertEqual(self.obj.getTitle('%(name)s - %(set)s.%(_ext)s'),
'Sound icon - Crystal SVG icon set.png')
def test_getDescription(self):
"""Test getDescription()."""
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
str("""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
}}"""))
class TestCSVReader(TestCase):
"""Test CSVReader class."""
family = 'commons'
code = 'commons'
def setUp(self):
"""Set up unit test."""
super(TestCSVReader, self).setUp()
with open(join_data_path('csv_ingestion.csv')) as fileobj:
self.iterator = data_ingestion.CSVReader(fileobj, 'url',
site=self.get_site())
self.obj = next(self.iterator)
def test_PhotoURL(self):
"""Test PhotoURL()."""
self.assertEqual(self.obj.URL,
'http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png')
def test_getTitle(self):
"""Test getTitle()."""
self.assertEqual(self.obj.getTitle('%(name)s - %(set)s.%(_ext)s'),
'Sound icon - Crystal SVG icon set.png')
def test_getDescription(self):
"""Test getDescription()."""
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
str("""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
|url=http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png
}}"""))
class TestDataIngestionBot(ScriptMainTestCase):
"""Test TestDataIngestionBot class."""
family = 'test'
code = 'test'
def test_existing_file(self):
"""Test uploading a file that already exists."""
data_ingestion.main(
'-csvdir:tests/data',
'-page:User:John_Vandenberg/data_ingestion_test_template')
if __name__ == '__main__': # pragma: no cover
unittest.main()
| mit |
pshen/ansible | lib/ansible/module_utils/facts/system/service_mgr.py | 59 | 5946 | # Collect facts related to system service manager and init.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class ServiceMgrFactCollector(BaseFactCollector):
name = 'service_mgr'
_fact_ids = set()
@staticmethod
def is_systemd_managed(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
def collect(self, module=None, collected_facts=None):
facts_dict = {}
if not module:
return facts_dict
collected_facts = collected_facts or {}
service_mgr_name = None
# TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
# FIXME: return code isnt checked
# FIXME: if stdout is empty string, odd things
# FIXME: other code seems to think we could get proc_1 == None past this point
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
# FIXME: empty string proc_1 staus empty string
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
# FIXME: empty string still falls through
service_mgr_name = proc_1_map.get(proc_1, proc_1)
# FIXME: replace with a system->service_mgr_name map?
# start with the easy ones
elif collected_facts.get('distribution', None) == 'MacOSX':
# FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
service_mgr_name = 'launchd'
else:
service_mgr_name = 'systemstarter'
elif 'BSD' in collected_facts.get('system', '') or collected_facts.get('system') in ['Bitrig', 'DragonFly']:
# FIXME: we might want to break out to individual BSDs or 'rc'
service_mgr_name = 'bsdinit'
elif collected_facts.get('system') == 'AIX':
service_mgr_name = 'src'
elif collected_facts.get('system') == 'SunOS':
service_mgr_name = 'smf'
elif collected_facts.get('distribution') == 'OpenWrt':
service_mgr_name = 'openwrt_init'
elif collected_facts.get('system') == 'Linux':
# FIXME: mv is_systemd_managed
if self.is_systemd_managed(module=module):
service_mgr_name = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
service_mgr_name = 'upstart'
elif os.path.exists('/sbin/openrc'):
service_mgr_name = 'openrc'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
service_mgr_name = 'service'
facts_dict['service_mgr'] = service_mgr_name
return facts_dict
| gpl-3.0 |
fibbo/DIRAC | Core/Workflow/Step.py | 7 | 17136 | ''' Step. Steps are included in the workflows, and include modules
'''
import os
import time
import types
import traceback
import sys
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Workflow.Parameter import Parameter, AttributeCollection, ParameterCollection, indent
from DIRAC.Core.Workflow.Module import InstancesPool, DefinitionsPool, ModuleInstance
class StepDefinition( AttributeCollection ):
def __init__( self, step_type = None, obj = None, parent = None ):
AttributeCollection.__init__( self )
self.module_instances = None
# this object can be shared with the workflow
# to if its =None and workflow!=None we have to
# pass everything above
self.module_definitions = None
self.parent = parent
# sort out Parameters and class attributes
if ( obj == None ) or isinstance( obj, ParameterCollection ):
self.setType( 'notgiven' )
self.setDescrShort( '' )
self.setDescription( '' )
self.setOrigin( '' )
self.setVersion( 0.0 )
self.parameters = ParameterCollection( obj ) # creating copy
self.module_instances = InstancesPool( self )
self.module_definitions = DefinitionsPool( self )
elif isinstance( obj, StepDefinition ):
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.setDescription( obj.getDescription() )
self.setOrigin( obj.getOrigin() )
self.setVersion( obj.getVersion() )
# copy instances and definitions
self.parameters = ParameterCollection( self, obj.parameters )
self.module_instances = InstancesPool( self, obj.module_instances )
if obj.module_definitions != None:
self.module_definitions = DefinitionsPool( self. obj.module_definitions )
else:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
if step_type :
self.setType( step_type )
def __str__( self ):
ret = str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
if self.module_definitions != None:
ret = ret + str( self.module_definitions )
else:
ret = ret + 'Module definitions shared in Workflow\n'
ret = ret + str( self.module_instances )
return ret
def toXML( self ):
ret = '<StepDefinition>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
if self.module_definitions != None:
ret = ret + self.module_definitions.toXML()
ret = ret + self.module_instances.toXML()
ret = ret + '</StepDefinition>\n'
return ret
def toXMLFile( self, outFile ):
if os.path.exists( outFile ):
os.remove( outFile )
xmlfile = open( outFile, 'w' )
xmlfile.write( self.toXML() )
xmlfile.close()
def addModule( self, module ):
# KGG We need to add code to update existing modules
if self.module_definitions == None:
self.parent.module_definitions.append( module )
else:
self.module_definitions.append( module )
return module
def createModuleInstance( self, module_type, name ):
''' Creates module instance of type 'type' with the name 'name'
'''
if self.module_definitions[module_type]:
mi = ModuleInstance( name, self.module_definitions[module_type] )
self.module_instances.append( mi )
return mi
else:
raise KeyError( 'Can not find ModuleDefinition ' + module_type + ' to create ModuleInstrance ' + name )
def removeModuleInstance( self, name ):
''' Remove module instance specified by its name
'''
self.module_instances.delete( name )
def compare( self, s ):
''' Custom Step comparison operation
'''
ret = AttributeCollection.compare( self, s ) and self.module_instances.compare( s )
if self.module_definitions.getOwner() == self:
ret = ret and self.module_definitions.compare( s )
return ret
def updateParents( self, parent ):
'''
'''
#AttributeCollection.updateParents( self, parent )
self.module_instances.updateParents( self )
if( self.module_definitions != None ):
self.module_definitions.updateParents( self )
def createCode( self ):
''' Create Step code
'''
str_ = 'class ' + self.getType() + ':\n'
str_ = str_ + indent( 1 ) + 'def execute(self):\n'
str_ = str_ + self.module_instances.createCode()
str_ = str_ + indent( 2 ) + '# output assignment\n'
for v in self.parameters:
if v.isOutput():
str_ = str_ + v.createParameterCode( 2, 'self' )
str_ += '\n'
return str_
class StepInstance( AttributeCollection ):
def __init__( self, name, obj = None, parent = None ):
AttributeCollection.__init__( self )
self.parent = None
if obj == None:
self.parameters = ParameterCollection()
elif isinstance( obj, StepInstance ) or isinstance( obj, StepDefinition ):
if name == None:
self.setName( obj.getName() )
else:
self.setName( name )
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.parameters = ParameterCollection( obj.parameters )
elif ( obj == None ) or isinstance( obj, ParameterCollection ):
# set attributes
self.setName( name )
self.setType( "" )
self.setDescrShort( "" )
self.parameters = ParameterCollection( obj )
elif obj != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
self.step_commons = {}
self.stepStatus = S_OK()
def resolveGlobalVars( self, step_definitions, wf_parameters ):
''' Resolve parameter values defined in the @{<variable>} form
'''
self.parameters.resolveGlobalVars( wf_parameters )
module_instance_number = 0
for inst in step_definitions[self.getType()].module_instances:
module_instance_number = module_instance_number + 1
if not inst.parameters.find( "MODULE_NUMBER" ):
inst.parameters.append( Parameter( "MODULE_NUMBER", "%s" % module_instance_number, "string", "", "",
True, False, "ModuleInstance number within the Step" ) )
if not inst.parameters.find( "MODULE_INSTANCE_NAME" ):
inst.parameters.append( Parameter( "MODULE_INSTANCE_NAME", inst.getName(), "string", "", "",
True, False, "Name of the ModuleInstance within the Step" ) )
if not inst.parameters.find( "MODULE_DEFINITION_NAME" ):
inst.parameters.append( Parameter( "MODULE_DEFINITION_NAME", inst.getType(), "string", "", "",
True, False, "Type of the ModuleInstance within the Step" ) )
if not inst.parameters.find( "JOB_ID" ):
inst.parameters.append( Parameter( "JOB_ID", "", "string", "self", "JOB_ID",
True, False, "Job ID within a Production as a string" ) )
if not inst.parameters.find( "PRODUCTION_ID" ):
inst.parameters.append( Parameter( "PRODUCTION_ID", "", "string", "self", "PRODUCTION_ID",
True, False, "Production ID as a string" ) )
if not inst.parameters.find( "STEP_NUMBER" ):
inst.parameters.append( Parameter( "STEP_NUMBER", "", "string", "self", "STEP_NUMBER",
True, False, "Step instance number within the Workflow" ) )
if not inst.parameters.find( "STEP_ID" ):
inst.parameters.append( Parameter( "STEP_ID", "", "string", "self", "STEP_NUMBER",
True, False, "Step ID within the Workflow" ) )
inst.resolveGlobalVars( wf_parameters, self.parameters )
def createCode( self, ind = 2 ):
''' Create the Step code
'''
str_ = indent( ind ) + self.getName() + ' = ' + self.getType() + '()\n'
str_ = str_ + self.parameters.createParametersCode( ind, self.getName() )
str_ = str_ + indent( ind ) + self.getName() + '.execute()\n\n'
return str_
def __str__( self ):
''' Step string representation
'''
return str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
def toXML( self ):
''' Generate the Step XML representation
'''
ret = '<StepInstance>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
ret = ret + '</StepInstance>\n'
return ret
def setWorkflowCommons( self, wf ):
''' Add reference to the collection of the common tools
'''
self.workflow_commons = wf
def execute( self, step_exec_attr, definitions ):
''' Step execution method. step_exec_attr is array to hold parameters belong to this Step,
filled above in the workflow
'''
print 'Executing StepInstance', self.getName(), 'of type', self.getType(), definitions.keys()
# Report the Application state if the coresponding tool is supplied
if self.workflow_commons.has_key( 'JobReport' ):
if self.parent.workflowStatus['OK']:
result = self.workflow_commons['JobReport'].setApplicationStatus( 'Executing ' + self.getName() )
# Prepare Step statistics evaluation
self.step_commons['StartTime'] = time.time()
self.step_commons['StartStats'] = os.times()
step_def = definitions[self.getType()]
step_exec_modules = {}
error_message = ''
for mod_inst in step_def.module_instances:
mod_inst_name = mod_inst.getName()
mod_inst_type = mod_inst.getType()
# print "StepInstance creating module instance ", mod_inst_name, " of type", mod_inst.getType()
step_exec_modules[mod_inst_name] = \
step_def.parent.module_definitions[mod_inst_type].main_class_obj() # creating instance
# Resolve all the linked parameter values
for parameter in mod_inst.parameters:
if parameter.preExecute():
# print '>>>> Input', parameter
if parameter.isLinked():
# print ">>>> ModuleInstance", mod_inst_name + '.' + parameter.getName(), '=', parameter.getLinkedModule() + '.' + parameter.getLinkedParameter()
if parameter.getLinkedModule() == 'self':
# tale value form the step_dict
setattr( step_exec_modules[mod_inst_name],
parameter.getName(),
step_exec_attr[parameter.getLinkedParameter()] )
else:
setattr( step_exec_modules[mod_inst_name],
parameter.getName(),
getattr( step_exec_modules[parameter.getLinkedModule()],
parameter.getLinkedParameter() ) )
else:
# print ">>>> ModuleInstance", mod_inst_name + '.' + parameter.getName(), '=', parameter.getValue()
setattr( step_exec_modules[mod_inst_name], parameter.getName(), parameter.getValue() )
# print 'Step Input Parameter:', parameter.getName(), getattr( step_exec_modules[mod_inst_name], parameter.getName() )
# Set reference to the workflow and step common tools
setattr( step_exec_modules[mod_inst_name], 'workflow_commons', self.parent.workflow_commons )
setattr( step_exec_modules[mod_inst_name], 'step_commons', self.step_commons )
setattr( step_exec_modules[mod_inst_name], 'stepStatus', self.stepStatus )
setattr( step_exec_modules[mod_inst_name], 'workflowStatus', self.parent.workflowStatus )
try:
result = step_exec_modules[mod_inst_name].execute()
if not result['OK']:
if self.stepStatus['OK']:
error_message = result['Message']
if self.workflow_commons.has_key( 'JobReport' ):
if self.parent.workflowStatus['OK']:
resultStatus = self.workflow_commons['JobReport'].setApplicationStatus( error_message )
self.stepStatus = S_ERROR( result['Message'] )
else:
for parameter in mod_inst.parameters:
if parameter.isOutput():
# print '<<<< Output', parameter
if parameter.isLinked():
# print "ModuleInstance self ." + parameter.getName(), '=', parameter.getLinkedModule() + '.' + parameter.getLinkedParameter()
if parameter.getLinkedModule() == 'self':
# this is not supposed to happen
print "Warning! Module OUTPUT attribute", parameter.getName(),
print "refer to the attribute of the same module", parameter.getLinkedParameter(), '=', getattr( step_exec_modules[mod_inst_name], parameter.getName() )
step_exec_attr[parameter.getName()] = getattr( step_exec_modules[mod_inst_name], parameter.getLinkedParameter(), parameter.getValue() )
# print " OUT", parameter.getLinkedParameter(), '=', getattr( step_exec_modules[mod_inst_name], parameter.getName(), parameter.getValue() )
else:
# print 'Output step_exec_attr', st_parameter.getName(), step_exec_modules[st_parameter.getLinkedModule()], parameter.getLinkedParameter()
step_exec_attr[parameter.getName()] = \
getattr( step_exec_modules[parameter.getLinkedModule()],
parameter.getLinkedParameter() )
else:
# This also does not make sense - we can give a warning
print "Warning! Module OUTPUT attribute ", parameter.getName(),
print "assigned constant", parameter.getValue()
# print "StepInstance self." + parameter.getName(), '=', parameter.getValue()
step_exec_attr[parameter.getName()] = parameter.getValue()
# print 'Module Output Parameter:', parameter.getName(), step_exec_attr[parameter.getName()]
# Get output values to the step_commons dictionary
for key in result.keys():
if key != "OK":
if key != "Value":
self.step_commons[key] = result[key]
elif type( result['Value'] ) == types.DictType:
for vkey in result['Value'].keys():
self.step_commons[vkey] = result['Value'][vkey]
except Exception, x:
print "Exception while module execution"
print "Module", mod_inst_name, mod_inst.getType()
print str( x )
exc = sys.exc_info()
exc_type = exc[0]
value = exc[1]
print "== EXCEPTION ==\n%s: %s\n\n%s===============" % (
exc_type,
value,
"\n".join( traceback.format_tb( exc[2] ) ) )
print "Step status: ", self.stepStatus
print "Workflow status: ", self.parent.workflowStatus
if self.stepStatus['OK']:
# This is the error that caused the workflow disruption
# report it to the WMS
error_message = 'Exception while %s module execution: %s' % ( mod_inst_name, str( x ) )
if self.workflow_commons.has_key( 'JobReport' ):
if self.parent.workflowStatus['OK']:
result = self.workflow_commons['JobReport'].setApplicationStatus( 'Exception in %s module' % mod_inst_name )
self.stepStatus = S_ERROR( error_message )
# now we need to copy output values to the STEP!!! parameters
for st_parameter in self.parameters:
if st_parameter.isOutput():
# print '<< Output', st_parameter
if st_parameter.isLinked():
# print "StepInstance self." + st_parameter.getName(), '=', st_parameter.getLinkedModule() + '.' + st_parameter.getLinkedParameter()
if st_parameter.getLinkedModule() == 'self':
# this is not supposed to happen
print "Warning! Step OUTPUT attribute", st_parameter.getName(),
print "refer to the attribute of the same step", st_parameter.getLinkedParameter(), step_exec_attr[st_parameter.getLinkedParameter()]
step_exec_attr[st_parameter.getName()] = step_exec_attr[st_parameter.getLinkedParameter()]
else:
# print 'Output step_exec_attr', st_parameter.getName(), step_exec_modules[st_parameter.getLinkedModule()], st_parameter.getLinkedParameter()
step_exec_attr[st_parameter.getName()] = \
getattr( step_exec_modules[st_parameter.getLinkedModule()],
st_parameter.getLinkedParameter() )
setattr( self, st_parameter.getName(), step_exec_attr[st_parameter.getName()] )
else:
# This also does not make sense - we can give a warning
print "Warning! Step OUTPUT attribute ", st_parameter.getName(),
print "assigned constant", st_parameter.getValue()
# print "StepInstance self." + st_parameter.getName(), '=', st_parameter.getValue()
step_exec_attr[st_parameter.getName()] = st_parameter.getValue()
print 'Step Output', st_parameter.getName(), '=', step_exec_attr[st_parameter.getName()]
# Return the result of the first failed module or S_OK
if not self.stepStatus['OK']:
return S_ERROR( error_message )
else:
return S_OK( result['Value'] )
| gpl-3.0 |
Pikecillo/genna | external/PyXML-0.8.4/test/test_domreg.py | 1 | 2996 | """Test DOM registration framework."""
import unittest
import test_support
from xml.dom import domreg
def parse_feature_string(s):
# helper to make sure the results are always plain lists
return list(domreg._parse_feature_string(s))
class DomregTestCase(unittest.TestCase):
def setUp(self):
domreg.registerDOMImplementation("its-a-fake",
self.getDOMImplementation)
def getDOMImplementation(self):
self.fake = FakeDOM(self.my_features)
return self.fake
def test_simple(self):
self.assertEqual(parse_feature_string("simple"),
[("simple", None)])
self.assertEqual(parse_feature_string("simple 1.0"),
[("simple", "1.0")])
self.assertEqual(parse_feature_string("simple complex"),
[("simple", None), ("complex", None)])
self.assertEqual(parse_feature_string("simple 2 complex 3.1.4.2"),
[("simple", "2"), ("complex", "3.1.4.2")])
def test_extra_version(self):
self.assertRaises(ValueError,
domreg._parse_feature_string, "1.0")
self.assertRaises(ValueError,
domreg._parse_feature_string, "1 simple")
self.assertRaises(ValueError,
domreg._parse_feature_string, "simple 1 2")
def test_find_myself(self):
self.my_features = [("splat", "1"), ("splat", "2"), ("splat", None)]
self.failUnless(domreg.getDOMImplementation(features="splat")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 1")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 2")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 1 splat 2")
is self.fake)
self.failUnless(domreg.getDOMImplementation(features="splat 2 splat 1")
is self.fake)
def _test_cant_find(self):
# This test is disabled since we need to determine what the
# right thing to do is. ;-( The DOM Level 3 draft says
# getDOMImplementation() should return null when there isn't a
# match, but the existing Python API raises ImportError.
self.my_features = []
self.failUnless(domreg.getDOMImplementation(features="splat")
is None)
self.failUnless(domreg.getDOMImplementation(features="splat 1")
is None)
class FakeDOM:
def __init__(self, features):
self.__features = features
def hasFeature(self, feature, version):
return (feature, version) in self.__features
def test_suite():
return unittest.makeSuite(DomregTestCase)
def test_main():
test_support.run_suite(test_suite())
if __name__ == "__main__":
test_support.verbose = 1
test_main()
| gpl-2.0 |
nikoonia/gem5v | src/mem/slicc/ast/TypeFieldAST.py | 92 | 1754 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.AST import AST
class TypeFieldAST(AST):
def __init__(self, slicc, pairs):
super(TypeFieldAST, self).__init__(slicc, pairs)
| bsd-3-clause |
raschlemper/teratec | node_modules/karma/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
zennobjects/kivy | kivy/adapters/listadapter.py | 7 | 17276 | '''
ListAdapter
=================
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
A :class:`ListAdapter` is an adapter around a python list.
Selection operations are a main concern for the class.
From an :class:`Adapter`, a :class:`ListAdapter` gets cls, template, and
args_converter properties and adds others that control selection behaviour:
* *selection*, a list of selected items.
* *selection_mode*, 'single', 'multiple', 'none'
* *allow_empty_selection*, a boolean -- If False, a selection is forced. If
True, and only user or programmatic action will change selection, it can
be empty.
If you wish to have a bare-bones list adapter, without selection, use a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`.
A :class:`~kivy.adapters.dictadapter.DictAdapter` is a subclass of a
:class:`~kivy.adapters.listadapter.ListAdapter`. They both dispatch the
*on_selection_change* event.
:Events:
`on_selection_change`: (view, view list )
Fired when selection changes
.. versionchanged:: 1.6.0
Added data = ListProperty([]), which was proably inadvertently deleted at
some point. This means that whenever data changes an update will fire,
instead of having to reset the data object (Adapter has data defined as
an ObjectProperty, so we need to reset it here to ListProperty). See also
DictAdapter and its set of data = DictProperty().
'''
__all__ = ('ListAdapter', )
import inspect
from kivy.event import EventDispatcher
from kivy.adapters.adapter import Adapter
from kivy.adapters.models import SelectableDataItem
from kivy.properties import ListProperty
from kivy.properties import DictProperty
from kivy.properties import BooleanProperty
from kivy.properties import OptionProperty
from kivy.properties import NumericProperty
from kivy.lang import Builder
class ListAdapter(Adapter, EventDispatcher):
'''
A base class for adapters interfacing with lists, dictionaries or other
collection type data, adding selection, view creation and management
functonality.
'''
data = ListProperty([])
'''The data list property is redefined here, overriding its definition as
an ObjectProperty in the Adapter class. We bind to data so that any
changes will trigger updates. See also how the
:class:`~kivy.adapters.DictAdapter` redefines data as a
:class:`~kivy.properties.DictProperty`.
:data:`data` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
selection = ListProperty([])
'''The selection list property is the container for selected items.
:data:`selection` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
selection_mode = OptionProperty('single',
options=('none', 'single', 'multiple'))
'''Selection modes:
* *none*, use the list as a simple list (no select action). This option
is here so that selection can be turned off, momentarily or
permanently, for an existing list adapter.
A :class:`~kivy.adapters.listadapter.ListAdapter` is not meant to be
used as a primary no-selection list adapter. Use a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` for that.
* *single*, multi-touch/click ignored. Single item selection only.
* *multiple*, multi-touch / incremental addition to selection allowed;
may be limited to a count by selection_limit
:data:`selection_mode` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'single'.
'''
propagate_selection_to_data = BooleanProperty(False)
'''Normally, data items are not selected/deselected because the data items
might not have an is_selected boolean property -- only the item view for a
given data item is selected/deselected as part of the maintained selection
list. However, if the data items do have an is_selected property, or if
they mix in :class:`~kivy.adapters.models.SelectableDataItem`, the
selection machinery can propagate selection to data items. This can be
useful for storing selection state in a local database or backend database
for maintaining state in game play or other similar scenarios. It is a
convenience function.
To propagate selection or not?
Consider a shopping list application for shopping for fruits at the
market. The app allows for the selection of fruits to buy for each day of
the week, presenting seven lists: one for each day of the week. Each list is
loaded with all the available fruits, but the selection for each is a
subset. There is only one set of fruit data shared between the lists, so
it would not make sense to propagate selection to the data because
selection in any of the seven lists would clash and mix with that of the
others.
However, consider a game that uses the same fruits data for selecting
fruits available for fruit-tossing. A given round of play could have a
full fruits list, with fruits available for tossing shown selected. If the
game is saved and rerun, the full fruits list, with selection marked on
each item, would be reloaded correctly if selection is always propagated to
the data. You could accomplish the same functionality by writing code to
operate on list selection, but having selection stored in the data
ListProperty might prove convenient in some cases.
:data:`propagate_selection_to_data` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
'''
allow_empty_selection = BooleanProperty(True)
'''The allow_empty_selection may be used for cascading selection between
several list views, or between a list view and an observing view. Such
automatic maintenance of the selection is important for all but simple
list displays. Set allow_empty_selection to False and the selection is
auto-initialized and always maintained, so any observing views
may likewise be updated to stay in sync.
:data:`allow_empty_selection` is a
:class:`~kivy.properties.BooleanProperty` and defaults to True.
'''
selection_limit = NumericProperty(-1)
'''When the selection_mode is multiple and the selection_limit is
non-negative, this number will limit the number of selected items. It can
be set to 1, which is equivalent to single selection. If selection_limit is
not set, the default value is -1, meaning that no limit will be enforced.
:data:`selection_limit` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1 (no limit).
'''
cached_views = DictProperty({})
'''View instances for data items are instantiated and managed by the
adapter. Here we maintain a dictionary containing the view
instances keyed to the indices in the data.
This dictionary works as a cache. get_view() only asks for a view from
the adapter if one is not already stored for the requested index.
:data:`cached_views` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
__events__ = ('on_selection_change', )
def __init__(self, **kwargs):
super(ListAdapter, self).__init__(**kwargs)
self.bind(selection_mode=self.selection_mode_changed,
allow_empty_selection=self.check_for_empty_selection,
data=self.update_for_new_data)
self.update_for_new_data()
def delete_cache(self, *args):
self.cached_views = {}
def get_count(self):
return len(self.data)
def get_data_item(self, index):
if index < 0 or index >= len(self.data):
return None
return self.data[index]
def selection_mode_changed(self, *args):
if self.selection_mode == 'none':
for selected_view in self.selection:
self.deselect_item_view(selected_view)
else:
self.check_for_empty_selection()
def get_view(self, index):
if index in self.cached_views:
return self.cached_views[index]
item_view = self.create_view(index)
if item_view:
self.cached_views[index] = item_view
return item_view
def create_view(self, index):
'''This method is more complicated than the one in
:class:`kivy.adapters.adapter.Adapter` and
:class:`kivy.adapters.simplelistadapter.SimpleListAdapter`, because
here we create bindings for the data item and its children back to
self.handle_selection(), and do other selection-related tasks to keep
item views in sync with the data.
'''
item = self.get_data_item(index)
if item is None:
return None
item_args = self.args_converter(index, item)
item_args['index'] = index
if self.cls:
view_instance = self.cls(**item_args)
else:
view_instance = Builder.template(self.template, **item_args)
if self.propagate_selection_to_data:
# The data item must be a subclass of SelectableDataItem, or must
# have an is_selected boolean or function, so it has is_selected
# available. If is_selected is unavailable on the data item, an
# exception is raised.
#
if isinstance(item, SelectableDataItem):
if item.is_selected:
self.handle_selection(view_instance)
elif type(item) == dict and 'is_selected' in item:
if item['is_selected']:
self.handle_selection(view_instance)
elif hasattr(item, 'is_selected'):
if (inspect.isfunction(item.is_selected)
or inspect.ismethod(item.is_selected)):
if item.is_selected():
self.handle_selection(view_instance)
else:
if item.is_selected:
self.handle_selection(view_instance)
else:
msg = "ListAdapter: unselectable data item for {0}"
raise Exception(msg.format(index))
view_instance.bind(on_release=self.handle_selection)
for child in view_instance.children:
child.bind(on_release=self.handle_selection)
return view_instance
def on_selection_change(self, *args):
'''on_selection_change() is the default handler for the
on_selection_change event.
'''
pass
def handle_selection(self, view, hold_dispatch=False, *args):
if view not in self.selection:
if self.selection_mode in ['none', 'single'] and \
len(self.selection) > 0:
for selected_view in self.selection:
self.deselect_item_view(selected_view)
if self.selection_mode != 'none':
if self.selection_mode == 'multiple':
if self.allow_empty_selection:
# If < 0, selection_limit is not active.
if self.selection_limit < 0:
self.select_item_view(view)
else:
if len(self.selection) < self.selection_limit:
self.select_item_view(view)
else:
self.select_item_view(view)
else:
self.select_item_view(view)
else:
self.deselect_item_view(view)
if self.selection_mode != 'none':
# If the deselection makes selection empty, the following call
# will check allows_empty_selection, and if False, will
# select the first item. If view happens to be the first item,
# this will be a reselection, and the user will notice no
# change, except perhaps a flicker.
#
self.check_for_empty_selection()
if not hold_dispatch:
self.dispatch('on_selection_change')
def select_data_item(self, item):
self.set_data_item_selection(item, True)
def deselect_data_item(self, item):
self.set_data_item_selection(item, False)
def set_data_item_selection(self, item, value):
if isinstance(item, SelectableDataItem):
item.is_selected = value
elif type(item) == dict:
item['is_selected'] = value
elif hasattr(item, 'is_selected'):
if (inspect.isfunction(item.is_selected)
or inspect.ismethod(item.is_selected)):
item.is_selected()
else:
item.is_selected = value
def select_item_view(self, view):
view.select()
view.is_selected = True
self.selection.append(view)
# [TODO] sibling selection for composite items
# Needed? Or handled from parent?
# (avoid circular, redundant selection)
#if hasattr(view, 'parent') and hasattr(view.parent, 'children'):
#siblings = [child for child in view.parent.children if child != view]
#for sibling in siblings:
#if hasattr(sibling, 'select'):
#sibling.select()
if self.propagate_selection_to_data:
data_item = self.get_data_item(view.index)
self.select_data_item(data_item)
def select_list(self, view_list, extend=True):
'''The select call is made for the items in the provided view_list.
Arguments:
view_list: the list of item views to become the new selection, or
to add to the existing selection
extend: boolean for whether or not to extend the existing list
'''
if not extend:
self.selection = []
for view in view_list:
self.handle_selection(view, hold_dispatch=True)
self.dispatch('on_selection_change')
def deselect_item_view(self, view):
view.deselect()
view.is_selected = False
self.selection.remove(view)
# [TODO] sibling deselection for composite items
# Needed? Or handled from parent?
# (avoid circular, redundant selection)
#if hasattr(view, 'parent') and hasattr(view.parent, 'children'):
#siblings = [child for child in view.parent.children if child != view]
#for sibling in siblings:
#if hasattr(sibling, 'deselect'):
#sibling.deselect()
if self.propagate_selection_to_data:
item = self.get_data_item(view.index)
self.deselect_data_item(item)
def deselect_list(self, l):
for view in l:
self.handle_selection(view, hold_dispatch=True)
self.dispatch('on_selection_change')
# [TODO] Could easily add select_all() and deselect_all().
def update_for_new_data(self, *args):
self.delete_cache()
self.initialize_selection()
def initialize_selection(self, *args):
if len(self.selection) > 0:
self.selection = []
self.dispatch('on_selection_change')
self.check_for_empty_selection()
def check_for_empty_selection(self, *args):
if not self.allow_empty_selection:
if len(self.selection) == 0:
# Select the first item if we have it.
v = self.get_view(0)
if v is not None:
self.handle_selection(v)
# [TODO] Also make methods for scroll_to_sel_start, scroll_to_sel_end,
# scroll_to_sel_middle.
def trim_left_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are less than the
index of the first selected item if there is a selection.
'''
if len(self.selection) > 0:
first_sel_index = min([sel.index for sel in self.selection])
self.data = self.data[first_sel_index:]
def trim_right_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are greater than
the index of the last selected item if there is a selection.
'''
if len(self.selection) > 0:
last_sel_index = max([sel.index for sel in self.selection])
print('last_sel_index', last_sel_index)
self.data = self.data[:last_sel_index + 1]
def trim_to_sel(self, *args):
'''Cut list items with indices in sorted_keys that are les than or
greater than the index of the last selected item if there is a
selection. This preserves intervening list items within the selected
range.
'''
if len(self.selection) > 0:
sel_indices = [sel.index for sel in self.selection]
first_sel_index = min(sel_indices)
last_sel_index = max(sel_indices)
self.data = self.data[first_sel_index:last_sel_index + 1]
def cut_to_sel(self, *args):
'''Same as trim_to_sel, but intervening list items within the selected
range are also cut, leaving only list items that are selected.
'''
if len(self.selection) > 0:
self.data = self.selection
| mit |
fanghuaqi/mbed | tools/host_tests/stdio_auto.py | 122 | 2105 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import random
from time import time
class StdioTest():
PATTERN_INT_VALUE = "Your value was: (-?\d+)"
re_detect_int_value = re.compile(PATTERN_INT_VALUE)
def test(self, selftest):
test_result = True
c = selftest.mbed.serial_readline() # {{start}} preamble
if c is None:
return selftest.RESULT_IO_SERIAL
selftest.notify(c)
for i in range(0, 10):
random_integer = random.randint(-99999, 99999)
selftest.notify("HOST: Generated number: " + str(random_integer))
start = time()
selftest.mbed.serial_write(str(random_integer) + "\n")
serial_stdio_msg = selftest.mbed.serial_readline()
if serial_stdio_msg is None:
return selftest.RESULT_IO_SERIAL
delay_time = time() - start
selftest.notify(serial_stdio_msg.strip())
# Searching for reply with scanned values
m = self.re_detect_int_value.search(serial_stdio_msg)
if m and len(m.groups()):
int_value = m.groups()[0]
int_value_cmp = random_integer == int(int_value)
test_result = test_result and int_value_cmp
selftest.notify("HOST: Number %s read after %.3f sec ... [%s]"% (int_value, delay_time, "OK" if int_value_cmp else "FAIL"))
else:
test_result = False
break
return selftest.RESULT_SUCCESS if test_result else selftest.RESULT_FAILURE
| apache-2.0 |
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping | configs/common/cpu2000.py | 48 | 22462 | # Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
import sys
from os.path import basename, exists, join as joinpath, normpath
from os.path import isdir, isfile, islink
spec_dist = os.environ.get('M5_CPU2000', '/dist/m5/cpu2000')
def copyfiles(srcdir, dstdir):
from filecmp import cmp as filecmp
from shutil import copyfile
srcdir = normpath(srcdir)
dstdir = normpath(dstdir)
if not isdir(dstdir):
os.mkdir(dstdir)
for root, dirs, files in os.walk(srcdir):
root = normpath(root)
prefix = os.path.commonprefix([root, srcdir])
root = root[len(prefix):]
if root.startswith('/'):
root = root[1:]
for entry in dirs:
newdir = joinpath(dstdir, root, entry)
if not isdir(newdir):
os.mkdir(newdir)
for entry in files:
dest = normpath(joinpath(dstdir, root, entry))
src = normpath(joinpath(srcdir, root, entry))
if not isfile(dest) or not filecmp(src, dest):
copyfile(src, dest)
# some of the spec benchmarks expect to be run from one directory up.
# just create some symlinks that solve the problem
inlink = joinpath(dstdir, 'input')
outlink = joinpath(dstdir, 'output')
if not exists(inlink):
os.symlink('.', inlink)
if not exists(outlink):
os.symlink('.', outlink)
class Benchmark(object):
def __init__(self, isa, os, input_set):
if not hasattr(self.__class__, 'name'):
self.name = self.__class__.__name__
if not hasattr(self.__class__, 'binary'):
self.binary = self.name
if not hasattr(self.__class__, 'args'):
self.args = []
if not hasattr(self.__class__, 'output'):
self.output = '%s.out' % self.name
if not hasattr(self.__class__, 'simpoint'):
self.simpoint = None
try:
func = getattr(self.__class__, input_set)
except AttributeError:
raise AttributeError, \
'The benchmark %s does not have the %s input set' % \
(self.name, input_set)
executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
if not isfile(executable):
raise AttributeError, '%s not found' % executable
self.executable = executable
# root of tree for input & output data files
data_dir = joinpath(spec_dist, 'data', self.name)
# optional subtree with files shared across input sets
all_dir = joinpath(data_dir, 'all')
# dirs for input & output files for this input set
inputs_dir = joinpath(data_dir, input_set, 'input')
outputs_dir = joinpath(data_dir, input_set, 'output')
# keep around which input set was specified
self.input_set = input_set
if not isdir(inputs_dir):
raise AttributeError, '%s not found' % inputs_dir
self.inputs_dir = [ inputs_dir ]
if isdir(all_dir):
self.inputs_dir += [ joinpath(all_dir, 'input') ]
if isdir(outputs_dir):
self.outputs_dir = outputs_dir
if not hasattr(self.__class__, 'stdin'):
self.stdin = joinpath(inputs_dir, '%s.in' % self.name)
if not isfile(self.stdin):
self.stdin = None
if not hasattr(self.__class__, 'stdout'):
self.stdout = joinpath(outputs_dir, '%s.out' % self.name)
if not isfile(self.stdout):
self.stdout = None
func(self, isa, os)
def makeLiveProcessArgs(self, **kwargs):
# set up default args for LiveProcess object
process_args = {}
process_args['cmd'] = [ self.name ] + self.args
process_args['executable'] = self.executable
if self.stdin:
process_args['input'] = self.stdin
if self.stdout:
process_args['output'] = self.stdout
if self.simpoint:
process_args['simpoint'] = self.simpoint
# explicit keywords override defaults
process_args.update(kwargs)
return process_args
def makeLiveProcess(self, **kwargs):
process_args = self.makeLiveProcessArgs(**kwargs)
# figure out working directory: use m5's outdir unless
# overridden by LiveProcess's cwd param
cwd = process_args.get('cwd')
if not cwd:
from m5 import options
cwd = options.outdir
process_args['cwd'] = cwd
if not isdir(cwd):
os.makedirs(cwd)
# copy input files to working directory
for d in self.inputs_dir:
copyfiles(d, cwd)
# generate LiveProcess object
from m5.objects import LiveProcess
return LiveProcess(**process_args)
def __str__(self):
return self.name
class DefaultBenchmark(Benchmark):
def ref(self, isa, os): pass
def test(self, isa, os): pass
def train(self, isa, os): pass
class MinneDefaultBenchmark(DefaultBenchmark):
def smred(self, isa, os): pass
def mdred(self, isa, os): pass
def lgred(self, isa, os): pass
class ammp(MinneDefaultBenchmark):
name = 'ammp'
number = 188
lang = 'C'
simpoint = 108*100E6
class applu(MinneDefaultBenchmark):
name = 'applu'
number = 173
lang = 'F77'
simpoint = 2179*100E6
class apsi(MinneDefaultBenchmark):
name = 'apsi'
number = 301
lang = 'F77'
simpoint = 3408*100E6
class art(DefaultBenchmark):
name = 'art'
number = 179
lang = 'C'
def test(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '139',
'-endy', '225',
'-objects', '1' ]
self.output = 'test.out'
def train(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '2',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '3' ]
self.output = 'train.out'
def lgred(self, isa, os):
self.args = ['-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-stride', '5',
'-startx', '134',
'-starty', '220',
'-endx', '184',
'-endy', '240',
'-objects', '1' ]
self.output = 'lgred.out'
class art110(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '110',
'-starty', '200',
'-endx', '160',
'-endy', '240',
'-objects', '10' ]
self.output = 'ref.1.out'
self.simpoint = 340*100E6
class art470(art):
def ref(self, isa, os):
self.args = [ '-scanfile', 'c756hel.in',
'-trainfile1', 'a10.img',
'-trainfile2', 'hc.img',
'-stride', '2',
'-startx', '470',
'-starty', '140',
'-endx', '520',
'-endy', '180',
'-objects', '10' ]
self.output = 'ref.2.out'
self.simpoint = 365*100E6
class equake(DefaultBenchmark):
name = 'equake'
number = 183
lang = 'C'
simpoint = 812*100E6
def lgred(self, isa, os): pass
class facerec(MinneDefaultBenchmark):
name = 'facerec'
number = 187
lang = 'F'
simpoint = 375*100E6
class fma3d(MinneDefaultBenchmark):
name = 'fma3d'
number = 191
lang = 'F'
simpoint = 2541*100E6
class galgel(MinneDefaultBenchmark):
name = 'galgel'
number = 178
lang = 'F'
simpoint = 2491*100E6
class lucas(MinneDefaultBenchmark):
name = 'lucas'
number = 189
lang = 'F'
simpoint = 545*100E6
class mesa(Benchmark):
name = 'mesa'
number = 177
lang = 'C'
stdin = None
def __set_args(self, frames):
self.args = [ '-frames', frames, '-meshfile', '%s.in' % self.name,
'-ppmfile', '%s.ppm' % self.name ]
def test(self, isa, os):
self.__set_args('10')
def train(self, isa, os):
self.__set_args('500')
def ref(self, isa, os):
self.__set_args('1000')
self.simpoint = 1135*100E6
def lgred(self, isa, os):
self.__set_args('1')
class mgrid(MinneDefaultBenchmark):
name = 'mgrid'
number = 172
lang = 'F77'
simpoint = 3292*100E6
class sixtrack(DefaultBenchmark):
name = 'sixtrack'
number = 200
lang = 'F77'
simpoint = 3043*100E6
def lgred(self, isa, os): pass
class swim(MinneDefaultBenchmark):
name = 'swim'
number = 171
lang = 'F77'
simpoint = 2079*100E6
class wupwise(DefaultBenchmark):
name = 'wupwise'
number = 168
lang = 'F77'
simpoint = 3237*100E6
def lgred(self, isa, os): pass
class bzip2(DefaultBenchmark):
name = 'bzip2'
number = 256
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.random' ]
def train(self, isa, os):
self.args = [ 'input.compressed' ]
class bzip2_source(bzip2):
def ref(self, isa, os):
self.simpoint = 977*100E6
self.args = [ 'input.source', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class bzip2_graphic(bzip2):
def ref(self, isa, os):
self.simpoint = 718*100E6
self.args = [ 'input.graphic', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class bzip2_program(bzip2):
def ref(self, isa, os):
self.simpoint = 458*100E6
self.args = [ 'input.program', '58' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class crafty(MinneDefaultBenchmark):
name = 'crafty'
number = 186
lang = 'C'
simpoint = 774*100E6
class eon(MinneDefaultBenchmark):
name = 'eon'
number = 252
lang = 'CXX'
stdin = None
class eon_kajiya(eon):
args = [ 'chair.control.kajiya', 'chair.camera', 'chair.surfaces',
'chair.kajiya.ppm', 'ppm', 'pixels_out.kajiya']
output = 'kajiya_log.out'
class eon_cook(eon):
args = [ 'chair.control.cook', 'chair.camera', 'chair.surfaces',
'chair.cook.ppm', 'ppm', 'pixels_out.cook' ]
output = 'cook_log.out'
class eon_rushmeier(eon):
args = [ 'chair.control.rushmeier', 'chair.camera', 'chair.surfaces',
'chair.rushmeier.ppm', 'ppm', 'pixels_out.rushmeier' ]
output = 'rushmeier_log.out'
simpoint = 403*100E6
class gap(DefaultBenchmark):
name = 'gap'
number = 254
lang = 'C'
def __set_args(self, size):
self.args = [ '-l', './', '-q', '-m', size ]
def test(self, isa, os):
self.__set_args('64M')
def train(self, isa, os):
self.__set_args('128M')
def ref(self, isa, os):
self.__set_args('192M')
self.simpoint = 674*100E6
def lgred(self, isa, os):
self.__set_args('64M')
def mdred(self, isa, os):
self.__set_args('64M')
def smred(self, isa, os):
self.__set_args('64M')
class gcc(DefaultBenchmark):
name = 'gcc'
number = 176
lang = 'C'
def test(self, isa, os):
self.args = [ 'cccp.i', '-o', 'cccp.s' ]
def train(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
def smred(self, isa, os):
self.args = [ 'c-iterate.i', '-o', 'c-iterate.s' ]
def mdred(self, isa, os):
self.args = [ 'rdlanal.i', '-o', 'rdlanal.s' ]
def lgred(self, isa, os):
self.args = [ 'cp-decl.i', '-o', 'cp-decl.s' ]
class gcc_166(gcc):
def ref(self, isa, os):
self.simpoint = 389*100E6
self.args = [ '166.i', '-o', '166.s' ]
class gcc_200(gcc):
def ref(self, isa, os):
self.simpoint = 736*100E6
self.args = [ '200.i', '-o', '200.s' ]
class gcc_expr(gcc):
def ref(self, isa, os):
self.simpoint = 36*100E6
self.args = [ 'expr.i', '-o', 'expr.s' ]
class gcc_integrate(gcc):
def ref(self, isa, os):
self.simpoint = 4*100E6
self.args = [ 'integrate.i', '-o', 'integrate.s' ]
class gcc_scilab(gcc):
def ref(self, isa, os):
self.simpoint = 207*100E6
self.args = [ 'scilab.i', '-o', 'scilab.s' ]
class gzip(DefaultBenchmark):
name = 'gzip'
number = 164
lang = 'C'
def test(self, isa, os):
self.args = [ 'input.compressed', '2' ]
def train(self, isa, os):
self.args = [ 'input.combined', '32' ]
class gzip_source(gzip):
def ref(self, isa, os):
self.simpoint = 334*100E6
self.args = [ 'input.source', '1' ]
def smred(self, isa, os):
self.args = [ 'input.source', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.source', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.source', '1' ]
class gzip_log(gzip):
def ref(self, isa, os):
self.simpoint = 265*100E6
self.args = [ 'input.log', '60' ]
def smred(self, isa, os):
self.args = [ 'input.log', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.log', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.log', '1' ]
class gzip_graphic(gzip):
def ref(self, isa, os):
self.simpoint = 653*100E6
self.args = [ 'input.graphic', '60' ]
def smred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.graphic', '1' ]
class gzip_random(gzip):
def ref(self, isa, os):
self.simpoint = 623*100E6
self.args = [ 'input.random', '60' ]
def smred(self, isa, os):
self.args = [ 'input.random', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.random', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.random', '1' ]
class gzip_program(gzip):
def ref(self, isa, os):
self.simpoint = 1189*100E6
self.args = [ 'input.program', '60' ]
def smred(self, isa, os):
self.args = [ 'input.program', '1' ]
def mdred(self, isa, os):
self.args = [ 'input.program', '1' ]
def lgred(self, isa, os):
self.args = [ 'input.program', '1' ]
class mcf(MinneDefaultBenchmark):
name = 'mcf'
number = 181
lang = 'C'
args = [ 'mcf.in' ]
simpoint = 553*100E6
class parser(MinneDefaultBenchmark):
name = 'parser'
number = 197
lang = 'C'
args = [ '2.1.dict', '-batch' ]
simpoint = 1146*100E6
class perlbmk(DefaultBenchmark):
name = 'perlbmk'
number = 253
lang = 'C'
def test(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'test.pl' ]
self.stdin = 'test.in'
class perlbmk_diffmail(perlbmk):
def ref(self, isa, os):
self.simpoint = 141*100E6
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '550', '15', '24',
'23', '100' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'diffmail.pl', '2', '350', '15', '24',
'23', '150' ]
class perlbmk_scrabbl(perlbmk):
def train(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'scrabbl.pl' ]
self.stdin = 'scrabbl.in'
class perlbmk_makerand(perlbmk):
def ref(self, isa, os):
self.simpoint = 11*100E6
self.args = [ '-I', 'lib', 'makerand.pl' ]
def lgred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'lgred.makerand.pl' ]
def mdred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'mdred.makerand.pl' ]
def smred(self, isa, os):
self.args = [ '-I.', '-I', 'lib', 'smred.makerand.pl' ]
class perlbmk_perfect(perlbmk):
def ref(self, isa, os):
self.simpoint = 5*100E6
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3', 'm', '4' ]
def train(self, isa, os):
self.args = [ '-I', 'lib', 'perfect.pl', 'b', '3' ]
class perlbmk_splitmail1(perlbmk):
def ref(self, isa, os):
self.simpoint = 405*100E6
self.args = [ '-I', 'lib', 'splitmail.pl', '850', '5', '19',
'18', '1500' ]
class perlbmk_splitmail2(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '704', '12', '26',
'16', '836' ]
class perlbmk_splitmail3(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '535', '13', '25',
'24', '1091' ]
class perlbmk_splitmail4(perlbmk):
def ref(self, isa, os):
self.args = [ '-I', 'lib', 'splitmail.pl', '957', '12', '23',
'26', '1014' ]
class twolf(Benchmark):
name = 'twolf'
number = 300
lang = 'C'
stdin = None
def test(self, isa, os):
self.args = [ 'test' ]
def train(self, isa, os):
self.args = [ 'train' ]
def ref(self, isa, os):
self.simpoint = 1066*100E6
self.args = [ 'ref' ]
def smred(self, isa, os):
self.args = [ 'smred' ]
def mdred(self, isa, os):
self.args = [ 'mdred' ]
def lgred(self, isa, os):
self.args = [ 'lgred' ]
class vortex(Benchmark):
name = 'vortex'
number = 255
lang = 'C'
stdin = None
def __init__(self, isa, os, input_set):
if (isa in ('alpha', 'arm', 'thumb', 'aarch64')):
self.endian = 'lendian'
elif (isa == 'sparc' or isa == 'sparc32'):
self.endian = 'bendian'
else:
raise AttributeError, "unknown ISA %s" % isa
super(vortex, self).__init__(isa, os, input_set)
def test(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def train(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def smred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def mdred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
def lgred(self, isa, os):
self.args = [ '%s.raw' % self.endian ]
self.output = 'vortex.out'
class vortex1(vortex):
def ref(self, isa, os):
self.args = [ '%s1.raw' % self.endian ]
self.output = 'vortex1.out'
self.simpoint = 271*100E6
class vortex2(vortex):
def ref(self, isa, os):
self.simpoint = 1024*100E6
self.args = [ '%s2.raw' % self.endian ]
self.output = 'vortex2.out'
class vortex3(vortex):
def ref(self, isa, os):
self.simpoint = 564*100E6
self.args = [ '%s3.raw' % self.endian ]
self.output = 'vortex3.out'
class vpr(MinneDefaultBenchmark):
name = 'vpr'
number = 175
lang = 'C'
# not sure about vpr minnespec place.in
class vpr_place(vpr):
args = [ 'net.in', 'arch.in', 'place.out', 'dum.out', '-nodisp',
'-place_only', '-init_t', '5', '-exit_t', '0.005',
'-alpha_t', '0.9412', '-inner_num', '2' ]
output = 'place_log.out'
class vpr_route(vpr):
simpoint = 476*100E6
args = [ 'net.in', 'arch.in', 'place.in', 'route.out', '-nodisp',
'-route_only', '-route_chan_width', '15',
'-pres_fac_mult', '2', '-acc_fac', '1',
'-first_iter_pres_fac', '4', '-initial_pres_fac', '8' ]
output = 'route_log.out'
all = [ ammp, applu, apsi, art, art110, art470, equake, facerec, fma3d, galgel,
lucas, mesa, mgrid, sixtrack, swim, wupwise, bzip2_source,
bzip2_graphic, bzip2_program, crafty, eon_kajiya, eon_cook,
eon_rushmeier, gap, gcc_166, gcc_200, gcc_expr, gcc_integrate,
gcc_scilab, gzip_source, gzip_log, gzip_graphic, gzip_random,
gzip_program, mcf, parser, perlbmk_diffmail, perlbmk_makerand,
perlbmk_perfect, perlbmk_splitmail1, perlbmk_splitmail2,
perlbmk_splitmail3, perlbmk_splitmail4, twolf, vortex1, vortex2,
vortex3, vpr_place, vpr_route ]
__all__ = [ x.__name__ for x in all ]
if __name__ == '__main__':
from pprint import pprint
for bench in all:
for input_set in 'ref', 'test', 'train':
print 'class: %s' % bench.__name__
x = bench('alpha', 'tru64', input_set)
print '%s: %s' % (x, input_set)
pprint(x.makeLiveProcessArgs())
print
| bsd-3-clause |
connexio/cypb | pb2.4/google/protobuf/service_reflection.py | 601 | 11010 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains metaclasses used to create protocol service and service stub
classes from ServiceDescriptor objects at runtime.
The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
inject all useful functionality into the classes output by the protocol
compiler at compile-time.
"""
__author__ = '[email protected] (Petar Petrov)'
class GeneratedServiceType(type):
"""Metaclass for service classes created at runtime from ServiceDescriptors.
Implementations for all methods described in the Service class are added here
by this class. We also create properties to allow getting/setting all fields
in the protocol message.
The protocol compiler currently uses this metaclass to create protocol service
classes at runtime. Clients can also manually create their own classes at
runtime, as in this example:
mydescriptor = ServiceDescriptor(.....)
class MyProtoService(service.Service):
__metaclass__ = GeneratedServiceType
DESCRIPTOR = mydescriptor
myservice_instance = MyProtoService()
...
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service class.
Args:
name: Name of the class (ignored, but required by the metaclass
protocol).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service class is subclassed.
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
service_builder = _ServiceBuilder(descriptor)
service_builder.BuildService(cls)
class GeneratedServiceStubType(GeneratedServiceType):
"""Metaclass for service stubs created at runtime from ServiceDescriptors.
This class has similar responsibilities as GeneratedServiceType, except that
it creates the service stub classes.
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
class _ServiceBuilder(object):
"""This class constructs a protocol service class using a service descriptor.
Given a service descriptor, this class constructs a class that represents
the specified service descriptor. One service builder instance constructs
exactly one service class. That means all instances of that class share the
same builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
service class.
"""
self.descriptor = service_descriptor
def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
"""The body of all methods in the generated service class.
Args:
method_name: Name of the method being executed.
rpc_controller: RPC controller used to execute this method.
callback: A callback which will be invoked when the method finishes.
"""
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
class _ServiceStubBuilder(object):
"""Constructs a protocol service stub class using a service descriptor.
Given a service descriptor, this class constructs a suitable stub class.
A stub is just a type-safe wrapper around an RpcChannel which emulates a
local implementation of the service.
One service stub builder instance constructs exactly one class. It means all
instances of that class share the same service stub builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service stub class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
stub class.
"""
self.descriptor = service_descriptor
def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method))
def _GenerateStubMethod(self, method):
return (lambda inst, rpc_controller, request, callback=None:
self._StubMethod(inst, method, rpc_controller, request, callback))
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback)
| bsd-3-clause |
afrigeo/google-visualization-python | examples/dynamic_example.py | 13 | 1525 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of dynamic use of Google Visualization Python API."""
__author__ = "Misha Seltzer"
import gviz_api
description = {"name": ("string", "Name"),
"salary": ("number", "Salary"),
"full_time": ("boolean", "Full Time Employee")}
data = [{"name": "Mike", "salary": (10000, "$10,000"), "full_time": True},
{"name": "Jim", "salary": (800, "$800"), "full_time": False},
{"name": "Alice", "salary": (12500, "$12,500"), "full_time": True},
{"name": "Bob", "salary": (7000, "$7,000"), "full_time": True}]
data_table = gviz_api.DataTable(description)
data_table.LoadData(data)
print "Content-type: text/plain"
print
print data_table.ToJSonResponse(columns_order=("name", "salary", "full_time"),
order_by="salary")
# Put the url (http://google-visualization.appspot.com/python/dynamic_example)
# as your Google Visualization data source.
| apache-2.0 |
aroberge/docpicture | examples/uml_sequence.py | 1 | 4381 | """
Just an empty module used as a test for uml sequence diagrams.
The following include some examples taken from the original website
http://www.websequencediagrams.com
and adapted for our purpose by including the docpicture directive.
Aside: just for a quick test, we include another type of object.
..docpicture:: uml_sequence
Alice->Bob: Authentication Request
Bob-->Alice: Authentication Response
..docpicture:: uml_sequence:rose
Alice->Bob: Authentication Request
Bob-->Alice: Authentication Response
..docpicture:: uml_sequence:napkin
Alice->Bob: Authentication Request
Bob-->Alice: Authentication Response
..docpicture:: uml_sequence:modern-blue
Alice->Bob: Authentication Request
Bob-->Alice: Authentication Response
Draw a signal from one participant to another like this:
..docpicture:: uml_sequence
Alice->Bob: Authentication Request
Bob-->Alice: Authentication Response
The participants are automatically created when they are used.
Use the "-->" syntax to draw a dotted line.
If you want to participants to be shown in a different order
than they are used, declare them first using the participant keyword.
You can also rename them this way to save typing.
..docpicture:: uml_sequence:modern-blue
participant Bob
participant Alice
participant "I have a really\\nlong name" as L
Alice->Bob: Authentication Request
Bob->Alice: Authentication Response
Bob->L: Log transaction
Signal to Self
A participant can send a signal to itself.
This will result in an arrow that turns back on itself.
You may break the text into multiple lines by using "\\n".
..docpicture:: uml_sequence:omegapple
Alice->Alice: This is a signal to self.\\nIt also demonstrates \\nmultiline \\ntext.
Grouping signals together
You can group signals together using the alt/else, opt,
and loop keywords. All of them can take a text description that will
be displayed in the group header.
Use the end keyword to signal the end of a group.
The groups may be nested to any depth.
..docpicture:: uml_sequence:earth
Alice->Bob: Authentication Request
alt successful case
Bob->Alice: Authentication Accepted
else some kind of failure
Bob->Alice: Authentication Failure
opt
loop 1000 times
Alice->Bob: DNS Attack
end
end
else Another type of failure
Bob->Alice: Please repeat
end
Notes in the diagram
You can add notes to your diagram.
Notes can be placed to the left of a participant or to the
right of a participant. In addition, you can centre a note over
one or more participants.
If a note contains more than one line, it will be not be word-wrapped.
Instead, it will be formatted exactly as written.
..docpicture:: uml_sequence:rose
participant Alice
participant Bob
note left of Alice
This is displayed
left of Alice.
end note
note right of Alice: This is displayed right of Alice.
note over Alice: This is displayed over Alice.
note over Alice, Bob: This is displayed over Bob and Alice.
Lifeline Activation and Destruction
Use the activate and deactivate keywords to denote object activation.
While activated, the participant's lifeline will be highlighted.
The activate/deactivate keywords will apply to the previous signal.
You can use the destroy keyword to destroy a participant.
The participant's lifeline will end at the previous signal.
..docpicture:: uml_sequence:mscgen
User->A: DoWork
activate A
A->B: <<createRequest>>
activate B
B->C: DoWork
activate C
C-->B: WorkDone
destroy C
B-->A: RequestCreated
deactivate B
A->User: Done
Finally, we include a simple case reproduced in all available styles.
This, together with the previous examples, is also a test to verify
that we can embed more than one diagram of a given style.
..docpicture:: uml_sequence
A->B: testing
..docpicture:: uml_sequence:earth
A->B: testing
..docpicture:: uml_sequence:modern-blue
A->B: testing
..docpicture:: uml_sequence:mscgen
A->B: testing
..docpicture:: uml_sequence:omegapple
A->B: testing
..docpicture:: uml_sequence:qsd
A->B: testing
..docpicture:: uml_sequence:rose
A->B: testing
..docpicture:: uml_sequence:roundgreen
A->B: testing
..docpicture:: uml_sequence:napkin
A->B: testing
"""
pass | bsd-3-clause |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/threading.py | 31 | 32474 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| gpl-2.0 |
Runscope/pysaml2 | src/saml2/ecp_client.py | 1 | 11162 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
Contains a class that can do SAML ECP Authentication for other python
programs.
"""
import cookielib
import logging
from saml2 import soap
from saml2 import saml
from saml2 import samlp
from saml2 import SAMLError
from saml2 import BINDING_SOAP
from saml2.client_base import MIME_PAOS
from saml2.config import Config
from saml2.entity import Entity
from saml2.httpbase import set_list2dict, dict2set_list
from saml2.profile import paos
from saml2.profile import ecp
from saml2.mdstore import MetadataStore
from saml2.s_utils import BadRequest
SERVICE = "urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
PAOS_HEADER_INFO = 'ver="%s";"%s"' % (paos.NAMESPACE, SERVICE)
logger = logging.getLogger(__name__)
class Client(Entity):
def __init__(self, user, passwd, sp="", idp=None, metadata_file=None,
xmlsec_binary=None, verbose=0, ca_certs="",
disable_ssl_certificate_validation=True, key_file=None,
cert_file=None, config=None):
"""
:param user: user name
:param passwd: user password
:param sp: The SP URL
:param idp: The IdP PAOS endpoint
:param metadata_file: Where the metadata file is if used
:param xmlsec_binary: Where the xmlsec1 binary can be found (*)
:param verbose: Chatty or not
:param ca_certs: is the path of a file containing root CA certificates
for SSL server certificate validation (*)
:param disable_ssl_certificate_validation: If
disable_ssl_certificate_validation is true, SSL cert validation
will not be performed (*)
:param key_file: Private key filename (*)
:param cert_file: Certificate filename (*)
:param config: Config() instance, overrides all the parameters marked
with an asterisk (*) above
"""
if not config:
config = Config()
config.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
config.key_file = key_file
config.cert_file = cert_file
config.ca_certs = ca_certs
config.xmlsec_binary = xmlsec_binary
Entity.__init__(self, "sp", config)
self._idp = idp
self._sp = sp
self.user = user
self.passwd = passwd
self._verbose = verbose
if metadata_file:
self._metadata = MetadataStore([saml, samlp], None, config)
self._metadata.load("local", metadata_file)
logger.debug("Loaded metadata from '%s'" % metadata_file)
else:
self._metadata = None
self.metadata = self._metadata
self.cookie_handler = None
self.done_ecp = False
self.cookie_jar = cookielib.LWPCookieJar()
def phase2(self, authn_request, rc_url, idp_entity_id, headers=None,
sign=False, **kwargs):
"""
Doing the second phase of the ECP conversation, the conversation
with the IdP happens.
:param authn_request: The AuthenticationRequest
:param rc_url: The assertion consumer service url of the SP
:param idp_entity_id: The EntityID of the IdP
:param headers: Possible extra headers
:param sign: If the message should be signed
:return: The response from the IdP
"""
_, destination = self.pick_binding("single_sign_on_service",
[BINDING_SOAP], "idpsso",
entity_id=idp_entity_id)
ht_args = self.apply_binding(BINDING_SOAP, authn_request, destination,
sign=sign)
if headers:
ht_args["headers"].extend(headers)
logger.debug("[P2] Sending request: %s" % ht_args["data"])
# POST the request to the IdP
response = self.send(**ht_args)
logger.debug("[P2] Got IdP response: %s" % response)
if response.status_code != 200:
raise SAMLError(
"Request to IdP failed (%s): %s" % (response.status_code,
response.error))
# SAMLP response in a SOAP envelope body, ecp response in headers
respdict = self.parse_soap_message(response.text)
if respdict is None:
raise SAMLError("Unexpected reply from the IdP")
logger.debug("[P2] IdP response dict: %s" % respdict)
idp_response = respdict["body"]
assert idp_response.c_tag == "Response"
logger.debug("[P2] IdP AUTHN response: %s" % idp_response)
_ecp_response = None
for item in respdict["header"]:
if item.c_tag == "Response" and item.c_namespace == ecp.NAMESPACE:
_ecp_response = item
_acs_url = _ecp_response.assertion_consumer_service_url
if rc_url != _acs_url:
error = ("response_consumer_url '%s' does not match" % rc_url,
"assertion_consumer_service_url '%s" % _acs_url)
# Send an error message to the SP
_ = self.send(rc_url, "POST", data=soap.soap_fault(error))
# Raise an exception so the user knows something went wrong
raise SAMLError(error)
return idp_response
@staticmethod
def parse_sp_ecp_response(respdict):
if respdict is None:
raise SAMLError("Unexpected reply from the SP")
logger.debug("[P1] SP response dict: %s" % respdict)
# AuthnRequest in the body or not
authn_request = respdict["body"]
assert authn_request.c_tag == "AuthnRequest"
# ecp.RelayState among headers
_relay_state = None
_paos_request = None
for item in respdict["header"]:
if item.c_tag == "RelayState" and item.c_namespace == ecp.NAMESPACE:
_relay_state = item
if item.c_tag == "Request" and item.c_namespace == paos.NAMESPACE:
_paos_request = item
if _paos_request is None:
raise BadRequest("Missing request")
_rc_url = _paos_request.response_consumer_url
return {"authn_request": authn_request, "rc_url": _rc_url,
"relay_state": _relay_state}
def ecp_conversation(self, respdict, idp_entity_id=None):
"""
:param respdict:
:param idp_entity_id:
:return:
"""
args = self.parse_sp_ecp_response(respdict)
# **********************
# Phase 2 - talk to the IdP
# **********************
idp_response = self.phase2(idp_entity_id=idp_entity_id, **args)
# **********************************
# Phase 3 - back to the SP
# **********************************
ht_args = self.use_soap(idp_response, args["rc_url"],
[args["relay_state"]])
logger.debug("[P3] Post to SP: %s" % ht_args["data"])
ht_args["headers"].append(('Content-Type', 'application/vnd.paos+xml'))
# POST the package from the IdP to the SP
response = self.send(args["rc_url"], "POST", **ht_args)
if response.status_code == 302:
# ignore where the SP is redirecting us to and go for the
# url I started off with.
pass
else:
print response.error
raise SAMLError(
"Error POSTing package to SP: %s" % response.error)
logger.debug("[P3] SP response: %s" % response.text)
self.done_ecp = True
logger.debug("Done ECP")
return None
def add_paos_headers(self, headers=None):
if headers:
headers = set_list2dict(headers)
headers["PAOS"] = PAOS_HEADER_INFO
if "Accept" in headers:
headers["Accept"] += ";%s" % MIME_PAOS
elif "accept" in headers:
headers["Accept"] = headers["accept"]
headers["Accept"] += ";%s" % MIME_PAOS
del headers["accept"]
headers = dict2set_list(headers)
else:
headers = [
('Accept', 'text/html; %s' % MIME_PAOS),
('PAOS', PAOS_HEADER_INFO)
]
return headers
def operation(self, url, idp_entity_id, op, **opargs):
"""
This is the method that should be used by someone that wants
to authenticate using SAML ECP
:param url: The page that access is sought for
:param idp_entity_id: The entity ID of the IdP that should be
used for authentication
:param op: Which HTTP operation (GET/POST/PUT/DELETE)
:param opargs: Arguments to the HTTP call
:return: The page
"""
if url not in opargs:
url = self._sp
# ********************************************
# Phase 1 - First conversation with the SP
# ********************************************
# headers needed to indicate to the SP that I'm ECP enabled
opargs["headers"] = self.add_paos_headers(opargs["headers"])
response = self.send(url, op, **opargs)
logger.debug("[Op] SP response: %s" % response)
if response.status_code != 200:
raise SAMLError(
"Request to SP failed: %s" % response.error)
# The response might be a AuthnRequest instance in a SOAP envelope
# body. If so it's the start of the ECP conversation
# Two SOAP header blocks; paos:Request and ecp:Request
# may also contain a ecp:RelayState SOAP header block
# If channel-binding was part of the PAOS header any number of
# <cb:ChannelBindings> header blocks may also be present
# if 'holder-of-key' option then one or more <ecp:SubjectConfirmation>
# header blocks may also be present
try:
respdict = self.parse_soap_message(response.text)
self.ecp_conversation(respdict, idp_entity_id)
# should by now be authenticated so this should go smoothly
response = self.send(url, op, **opargs)
except (soap.XmlParseError, AssertionError, KeyError):
pass
#print "RESP",response, self.http.response
if response.status_code != 404:
raise SAMLError("Error performing operation: %s" % (
response.error,))
return response
# different HTTP operations
def delete(self, url=None, idp_entity_id=None):
return self.operation(url, idp_entity_id, "DELETE")
def get(self, url=None, idp_entity_id=None, headers=None):
return self.operation(url, idp_entity_id, "GET", headers=headers)
def post(self, url=None, data="", idp_entity_id=None, headers=None):
return self.operation(url, idp_entity_id, "POST", data=data,
headers=headers)
def put(self, url=None, data="", idp_entity_id=None, headers=None):
return self.operation(url, idp_entity_id, "PUT", data=data,
headers=headers)
| bsd-2-clause |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| gpl-2.0 |
JulienPeloton/LaFabrique | generate_dataset.py | 1 | 3629 | import os
import sys
import argparse
import ConfigParser
from LaFabrique import scanning_strategy
from LaFabrique import noise
from LaFabrique import util_CMB
from LaFabrique import communication as comm
try:
from LaFabrique import foreground
except:
print 'PySM not found - no foreground generation possible...'
def addargs(parser):
''' Parse command line arguments '''
parser.add_argument(
'-setup_env', dest='setup_env',
required=True,
help='Configuration file for the environment.')
parser.add_argument(
'-setup_scanning', dest='setup_scanning',
required=False, default=None,
help='Configuration file for the scanning strategy.')
parser.add_argument(
'-setup_instrument', dest='setup_instrument',
required=False, default=None,
help='Configuration file for the instrument.')
parser.add_argument(
'-setup_foregrounds', dest='setup_foregrounds',
required=False, default=None,
help='Configuration file for the foregrounds (PySM).')
def grabargs(args_param=None):
''' Parse command line arguments '''
parser = argparse.ArgumentParser(
description='Package to generate simulated CMB datasets.')
addargs(parser)
args = parser.parse_args(args_param)
if comm.rank == 0:
list_of_sims = ''
if args.setup_scanning is not None:
list_of_sims += ' scans '
if args.setup_instrument is not None:
list_of_sims += ' noise '
if args.setup_foregrounds is not None:
list_of_sims += ' foregrounds'
if len(list_of_sims) == 0:
print 'You need to select at least one ini file!\n'
print ' * scans (see setup_scanning.ini)\n'
print ' * instrument (see setup_instrument.ini)\n'
print ' * foregrounds (see setup_foregrounds.ini)\n'
sys.exit()
else:
print 'Simulations of', list_of_sims
Config = ConfigParser.ConfigParser()
Config.read(args.setup_env)
environment = util_CMB.normalise_env_parser(
Config._sections['Environment'])
## Initialise paths
environment.outpath_noise = os.path.join(
environment.out_path, 'noise')
environment.outpath_masks = os.path.join(
environment.out_path, 'masks')
environment.outpath_foregrounds = os.path.join(
environment.out_path, 'foregrounds')
## Create folders if necessary
if comm.rank == 0:
## Create root folder
if not os.path.exists(environment.out_path):
os.makedirs(environment.out_path)
## Create folders for noise and masks
if not os.path.exists(environment.outpath_noise):
os.makedirs(environment.outpath_noise)
if not os.path.exists(environment.outpath_masks):
os.makedirs(environment.outpath_masks)
if not os.path.exists(environment.outpath_foregrounds):
os.makedirs(environment.outpath_foregrounds)
return args, environment
if __name__ == '__main__':
args_param = None
args, environment = grabargs(args_param)
if args.setup_scanning is not None and comm.rank == 0:
scanning_strategy.generate_scans(args.setup_scanning, environment)
comm.barrier()
## Generate noise
if args.setup_instrument is not None:
noise.generate_noise_sims(args.setup_instrument, comm, environment)
comm.barrier()
## Generate foregrounds
if args.setup_foregrounds is not None and comm.rank == 0:
foreground.generate_foregrounds(args.setup_foregrounds, environment)
comm.barrier()
| gpl-3.0 |
ericschultz/baddatelist_django | hetaira/parser/tests.py | 1 | 2486 | from django.test import TestCase
from hetaira.parser import TokenizerError, MessageParser, ParserError
class TestParsing(TestCase):
def test_phone_numbers_request(self):
parser = MessageParser()
possible_numbers = ["9205555555", " 9205555555", "9205555555 ", " (92055555)5-5 "]
results = map(lambda x: parser.parse(x), possible_numbers)
for r in results:
self.assertEqual(type(r).__name__, "Request")
self.assertEqual(r.req_id, "9205555555")
def test_email_address_request(self):
parser = MessageParser()
possible_emails = ["[email protected]", " [email protected] "]
results = map(lambda x: parser.parse(x), possible_emails)
for r in results:
self.assertEqual(type(r).__name__, "Request")
self.assertEqual(r.req_id, "[email protected]")
def test_license_request(self):
parser = MessageParser()
possible_license = ["WI*152510fC", " WI*152510fC "]
results = map(lambda x: parser.parse(x), possible_license)
for r in results:
self.assertEqual(type(r).__name__, "Request")
self.assertEqual(r.req_id, "WI*152510fC")
def test_phone_numbers_response(self):
parser = MessageParser()
possible_reports = ["9205555555 NC DR PO ST PH",
" 9205555555 NC DR PO ST PH", "9205555555 NC DR PO ST PH",
" (92055555)5-5 N-C D(R PO S)T PH"]
results = map(lambda x: parser.parse(x), possible_reports)
for r in results:
self.assertEqual(type(r).__name__, "Report")
self.assertEqual(r.req_id, "9205555555")
self.assertItemsEqual(r.conditions, ["NC", "DR", "PO", "ST", "PH"])
def test_tokenizer_exception(self):
parser = MessageParser()
possible_ids = ["5444444444444444", "WI", "So at ba", "AA"]
for req_id in possible_ids:
with self.assertRaises(TokenizerError) as cm:
parser.parse(req_id)
exception = cm.exception
self.assertEqual(exception.code, TokenizerError.INVALID_TOKEN)
def test_parsing_exception_expected_ID(self):
parser = MessageParser()
possible_ids = ["NP"]
for req_id in possible_ids:
with self.assertRaises(ParserError) as cm:
parser.parse(req_id)
exception = cm.exception
self.assertEqual(exception.code, ParserError.UNEXPECTED_CONDITION)
| agpl-3.0 |
ak110/pytoolkit | pytoolkit/models.py | 1 | 26024 | """Kerasのモデル関連。
Horovodに対応した簡単なwrapperなど。
ただし引数のデフォルトや細かい挙動を変えていたりするので要注意。
"""
from __future__ import annotations
import functools
import hashlib
import logging
import os
import pathlib
import tempfile
import typing
import numpy as np
import tensorflow as tf
import pytoolkit as tk
# モデルの入出力の型
ModelIOType = typing.Union[
np.ndarray, typing.List[np.ndarray], typing.Dict[str, np.ndarray]
]
# predictで使う型
OnBatchFnType = typing.Callable[[tf.keras.models.Model, ModelIOType], ModelIOType]
# compileで使う型
OptimizerType = typing.Union[str, tf.keras.optimizers.Optimizer]
LossType = typing.Union[
str, tf.keras.losses.Loss, typing.Callable[[tf.Tensor, tf.Tensor], tf.Tensor]
]
MetricType = typing.Union[
str, tf.keras.metrics.Metric, typing.Callable[[tf.Tensor, tf.Tensor], tf.Tensor]
]
MetricsType = typing.List[MetricType]
logger = logging.getLogger(__name__)
def check(
train_model: tf.keras.models.Model,
pred_model: tf.keras.models.Model,
models_dir: tk.typing.PathLike,
dataset: tk.data.Dataset = None,
train_data_loader: tk.data.DataLoader = None,
pred_data_loader: tk.data.DataLoader = None,
save_mode: str = "hdf5",
):
"""モデルの簡易動作確認用コード。
Args:
train_model: 学習用モデル
pred_model: 推論用モデル
models_dir: 情報の保存先ディレクトリ
dataset: チェック用データ (少数にしておくこと)
train_data_loader: 学習用DataLoader
pred_data_loader: 推論用DataLoader
save_mode: 保存形式 ("hdf5", "saved_model", "onnx", "tflite"のいずれか)
"""
models_dir = pathlib.Path(models_dir)
# summary表示
tk.models.summary(train_model)
# グラフを出力
tk.models.plot(train_model, models_dir / "model.png")
# save/loadの動作確認 (とりあえず落ちなければOKとする)
with tempfile.TemporaryDirectory() as tmpdir:
save_path = pathlib.Path(tmpdir) / f"model.{save_mode}"
tk.models.save(pred_model, save_path)
pred_model = tk.models.load(save_path)
# train_model.evaluate
if dataset is not None and train_data_loader is not None:
ds, steps = train_data_loader.get_ds(dataset, shuffle=True)
logger.info(f"train_model.evaluate: {ds.element_spec} {steps=}")
values = train_model.evaluate(ds, steps=steps, verbose=1)
if len(train_model.metrics_names) == 1:
evals = {train_model.metrics_names[0]: values}
else:
evals = dict(zip(train_model.metrics_names, values))
logger.info(f"check.evaluate: {tk.evaluations.to_str(evals)}")
# pred_model.predict
if dataset is not None and pred_data_loader is not None:
ds, steps = pred_data_loader.get_ds(dataset)
logger.info(f"pred_model.evaluate: {ds.element_spec} {steps=}")
pred = pred_model.predict(ds, steps=steps, verbose=1)
if isinstance(pred, (list, tuple)):
logger.info(f"check.predict: shape={[p.shape for p in pred]}")
else:
logger.info(f"check.predict: shape={pred.shape}")
# train_model.fit
if dataset is not None and train_data_loader is not None:
ds, steps = train_data_loader.get_ds(dataset, shuffle=True)
train_model.fit(ds, steps_per_epoch=steps, epochs=1, verbose=1)
def load(
path: tk.typing.PathLike,
custom_objects: typing.Dict[str, typing.Any] = None,
compile: bool = False, # pylint: disable=redefined-outer-name
):
"""モデルの読み込み。"""
with tk.log.trace(f"load({path})"):
model = tf.keras.models.load_model(
str(path), custom_objects=custom_objects, compile=compile
)
# 念のため重みのfingerprintをログ出力しておく
logger.info(f"fingerprint: {tk.models.fingerprint(model)}")
return model
def load_weights(
model: tf.keras.models.Model,
path: tk.typing.PathLike,
by_name: bool = False,
skip_mismatch: bool = False,
skip_not_exist: bool = False,
strict: bool = True,
strict_fraction: float = 0.95,
) -> bool:
"""モデルの重みの読み込み。
Args:
model: モデル
path: ファイルパス
by_name: レイヤー名が一致する重みを読むモードにするならTrue。Falseなら並び順。
skip_mismatch: shapeが不一致の場合にskipするならTrue。(by_name=Trueの場合のみ有効)
skip_not_exist: ファイルが存在しない場合にエラーにしないならTrue。
strict: 読み込み前と重みがあまり変わらなかったらエラーにする。
strict_fraction: 重み不一致率の最低値。これ以下ならエラーにする。
Returns:
読み込んだか否か。skip_not_exist=Trueの場合に限りFalseが返る可能性がある。
"""
path = pathlib.Path(path)
if path.exists():
with tk.log.trace(f"load_weights({path})"):
if strict:
old_weights = model.get_weights()
if path.is_dir():
# SavedModelはload_weights未対応?
# TODO: by_name, skip_mismatch対応?
loaded_model = tf.keras.models.load_model(str(path), compile=False)
model.set_weights(loaded_model.get_weights())
else:
model.load_weights(
str(path), by_name=by_name, skip_mismatch=skip_mismatch
)
if strict:
new_weights = model.get_weights()
changed_params = np.sum(
[
np.sum(np.not_equal(w1, w2))
for w1, w2 in zip(old_weights, new_weights)
]
)
num_params = np.sum([w.size for w in new_weights])
r = changed_params / num_params
msg = f"{changed_params:,} params chagnged. ({r:.1%})"
if r < strict_fraction:
raise RuntimeError(msg)
logger.info(msg)
# 念のため重みのfingerprintをログ出力しておく
logger.info(f"fingerprint: {tk.models.fingerprint(model)}")
elif skip_not_exist:
logger.info(f"{path} is not found.")
return False
else:
raise RuntimeError(f"{path} is not found.")
return True
def save(
model: tf.keras.models.Model,
path: tk.typing.PathLike,
mode: str = "hdf5",
include_optimizer: bool = False,
):
"""モデルの保存。
Args:
model: モデル
path: 保存先。saved_modelの場合はディレクトリ
mode: "hdf5", "saved_model", "onnx", "tflite"のいずれか
include_optimizer: HDF5形式で保存する場合にoptimizerを含めるか否か
"""
assert mode in ("hdf5", "saved_model", "onnx", "tflite")
path = pathlib.Path(path)
if tk.hvd.is_master():
with tk.log.trace(f"save({path})"):
path.parent.mkdir(parents=True, exist_ok=True)
if mode in ("hdf5", "saved_model"):
model.save(
str(path),
overwrite=True,
include_optimizer=include_optimizer,
save_format={"hdf5": "h5", "saved_model": "tf"}[mode],
)
elif mode == "onnx":
os.environ["TF_KERAS"] = "1"
import keras2onnx
import onnxmltools
onnx_model = keras2onnx.convert_keras(model, model.name)
onnxmltools.utils.save_model(onnx_model, str(path))
elif mode == "tflite":
tflite_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
with path.open("wb") as f:
f.write(tflite_model)
else:
raise ValueError(f"Invalid save format: {mode}")
# 念のため重みのfingerprintをログ出力しておく
logger.info(f"fingerprint: {tk.models.fingerprint(model)}")
tk.hvd.barrier()
def summary(model: tf.keras.models.Model):
"""summaryを実行するだけ。"""
model.summary(print_fn=logger.info if tk.hvd.is_master() else lambda x: None) # type: ignore
def plot(
model: tf.keras.models.Model,
to_file: tk.typing.PathLike = "model.png",
show_shapes: bool = True,
show_layer_names: bool = True,
rankdir: str = "TB",
):
"""モデルのグラフのplot。"""
path = pathlib.Path(to_file)
if tk.hvd.is_master():
with tk.log.trace(f"plot({path})"):
path.parent.mkdir(parents=True, exist_ok=True)
try:
# workaround: https://github.com/tensorflow/tensorflow/issues/38988
model = tf.keras.models.clone_model(model)
model._layers = [ # pylint: disable=protected-access
layer
for layer in model._layers # pylint: disable=protected-access
if isinstance(layer, tf.keras.layers.Layer)
]
tf.keras.utils.plot_model(
model,
str(path),
show_shapes=show_shapes,
show_layer_names=show_layer_names,
rankdir=rankdir,
)
except ValueError:
pass # "Cannot embed the 'svg' image format" (tf >= 1.14)
tk.hvd.barrier()
def compile(
model: tf.keras.models.Model,
optimizer: OptimizerType,
loss: LossType = None,
metrics: MetricsType = None,
experimental_run_tf_function: bool = None,
**kwargs,
): # pylint: disable=redefined-builtin
"""compileするだけ。"""
with tk.log.trace("compile"):
if tk.hvd.initialized():
optimizer = tf.keras.optimizers.get(optimizer)
c = tk.hvd.get().__dict__.get("Compression")
if c is None:
c = tk.hvd.get().__dict__.get("compression").Compression
optimizer = tk.hvd.get().DistributedOptimizer(optimizer, compression=c.fp16)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
if experimental_run_tf_function is None:
experimental_run_tf_function = False
else:
if experimental_run_tf_function is None:
experimental_run_tf_function = True
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function,
**kwargs,
)
def recompile(model: tf.keras.models.Model):
"""optimizerなどを再利用してコンパイル。"""
with tk.log.trace("recompile"):
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
model.compile(
optimizer=model.optimizer,
loss=model.loss,
metrics=model.metrics,
experimental_run_tf_function=False,
)
def modify_learning_rate(model: tf.keras.models.Model, factor: float):
"""学習率にfactorを掛け算する。"""
lr = tf.keras.backend.get_value(model.optimizer.learning_rate) * factor
set_learning_rate(model, lr)
def set_learning_rate(model: tf.keras.models.Model, learnig_rate: float):
"""学習率を設定する。"""
tf.keras.backend.set_value(model.optimizer.learning_rate, learnig_rate)
def fit(
model: tf.keras.models.Model,
train_iterator: tk.data.Iterator,
val_iterator: tk.data.Iterator = None,
val_freq: typing.Union[int, typing.Sequence[int], str, None] = "auto",
class_weight: typing.Dict[int, float] = None,
epochs: int = 1800,
callbacks: typing.List[tf.keras.callbacks.Callback] = None,
verbose: int = 1,
initial_epoch: int = 0,
):
"""学習。
Args:
model: モデル
train_iterator: 訓練データ
val_iterator: 検証データ。Noneなら省略。
val_freq: 検証を行うエポック数の間隔、またはエポック数のリスト。0ならvalidationしない(独自仕様)。"auto"なら適当に決める(独自仕様)。
class_weight: クラスごとの重みのdict
epochs: エポック数
callbacks: コールバック。EpochLoggerとErrorOnNaNとhorovod関連は自動追加。
verbose: 1ならプログレスバー表示、2ならepoch毎の結果だけ表示。
initial_epoch: 学習を開始するエポック数 - 1
"""
# Horovodはそれぞれのワーカーが勝手にvalidateするのでshuffleする必要がある。
# shuffleするならデータ数分だけでは全体をカバーできないため3倍回す。
# horovodのexamplesの真似:
# <https://github.com/horovod/horovod/blob/9bdd70d/examples/keras_mnist_advanced.py#L112,L115>
use_horovod = tk.hvd.is_active()
if val_freq == 0 or val_iterator is None:
# val_freq == 0ならvalidationしない(独自仕様)
val_freq = None
val_iterator = None
elif val_freq == "auto":
# "auto"なら適当に決める(独自仕様)
val_freq = make_val_freq(
val_freq,
epochs,
len(train_iterator.dataset),
len(val_iterator.dataset) * (3 if use_horovod else 1),
)
train_ds, train_steps = train_iterator.data_loader.get_ds(
train_iterator.dataset, shuffle=True
)
val_ds, val_steps = (
val_iterator.data_loader.get_ds(
val_iterator.dataset,
shuffle=use_horovod,
)
if val_iterator is not None
else (None, 0)
)
logger.info(f"fit(train): {train_ds.element_spec} {train_steps=}")
if val_ds is not None:
logger.info(f"fit(val): {val_ds.element_spec} {val_steps=}")
callbacks = make_callbacks(callbacks, training=True)
fit_kwargs = {}
if val_freq is not None:
fit_kwargs["validation_freq"] = val_freq
with tk.log.trace("fit"):
model.fit(
train_ds,
steps_per_epoch=train_steps // tk.hvd.size(),
validation_data=val_ds,
validation_steps=(
val_steps * 3 // tk.hvd.size() if use_horovod else val_steps
)
if val_iterator is not None
else None,
class_weight=class_weight,
epochs=epochs,
callbacks=callbacks,
verbose=verbose if tk.hvd.is_master() else 0,
initial_epoch=initial_epoch,
**fit_kwargs,
)
def make_val_freq(val_freq, epochs, train_size, val_size, max_val_per_train=0.1):
"""val_freqをほどよい感じに作成する。"""
# sqrt(epochs)回くらいやれば十分? (指標にも依るが…)
# valがtrainの10%未満くらいなら毎回やっても問題無い
val_freq = max(
int(np.sqrt(epochs)),
int(val_size / (train_size * max_val_per_train)),
1,
)
# 最低でも10回くらいはやりたい
val_freq = min(val_freq, max(1, epochs // 10))
# 最後のepochはvalidationしたいので、そこからval_freq毎に。
val_list = list(range(epochs, 0, -val_freq))
# あまり早いepochではやらない
if len(val_list) >= 2 and val_list[0] < val_freq:
val_list = val_list[1:]
return val_list
def make_callbacks(
callbacks: typing.Optional[typing.List[tf.keras.callbacks.Callback]], training: bool
) -> typing.List[tf.keras.callbacks.Callback]:
"""callbacksをいい感じにする。"""
callbacks = (callbacks or []).copy()
if training:
callbacks.append(tk.callbacks.EpochLogger())
callbacks.append(tk.callbacks.ErrorOnNaN())
if tk.hvd.is_active():
callbacks.append(tk.hvd.get().callbacks.BroadcastGlobalVariablesCallback(0))
callbacks.append(tk.hvd.get().callbacks.MetricAverageCallback())
return callbacks
def predict(
model: tf.keras.models.Model,
iterator: tk.data.Iterator,
callbacks: typing.List[tf.keras.callbacks.Callback] = None,
verbose: int = 1,
on_batch_fn: OnBatchFnType = None,
) -> ModelIOType:
"""推論。
Args:
model: モデル
iterator: 推論したい入力データ
callbacks: コールバック
verbose: プログレスバーを表示するか否か
on_batch_fn: モデルとミニバッチ分の入力データを受け取り、推論結果を返す処理。(TTA用)
flow: 結果をgeneratorで返すならTrue
desc: flow時のtqdmのdesc
Returns:
推論結果。
"""
with tk.log.trace("predict"):
use_horovod = tk.hvd.is_active()
verbose = verbose if tk.hvd.is_master() else 0
callbacks = make_callbacks(callbacks, training=False)
dataset = tk.hvd.split(iterator.dataset) if use_horovod else iterator.dataset
ds, steps = iterator.data_loader.get_ds(dataset, without_label=True)
logger.info(f"predict: {ds.element_spec} {steps=}")
if on_batch_fn is not None:
gen = _predict_flow(
model=model,
ds=ds,
steps=steps,
callbacks=callbacks,
verbose=verbose,
on_batch_fn=on_batch_fn,
desc="predict",
)
results = list(gen)
if isinstance(results[0], (list, tuple)): # multiple output
values = [
np.array([r[i] for r in results]) for i in range(len(results[0]))
]
else:
values = np.array(results)
else:
values = model.predict(
ds,
steps=steps,
verbose=verbose,
callbacks=callbacks,
)
values = tk.hvd.allgather(values) if use_horovod else values
return values
def predict_flow(
model: tf.keras.models.Model,
ds: tf.data.Dataset,
steps: int,
callbacks: typing.List[tf.keras.callbacks.Callback] = None,
verbose: int = 1,
on_batch_fn: OnBatchFnType = None,
desc: str = "predict",
) -> typing.Iterator[ModelIOType]:
"""推論。
Args:
model: モデル
ds: 推論したい入力データ
steps: ステップ数
callbacks: コールバック
verbose: プログレスバー(tqdm)を表示するか否か
on_batch_fn: モデルとミニバッチ分の入力データを受け取り、推論結果を返す処理。(TTA用)
flow: 結果をgeneratorで返すならTrue
desc: flow時のtqdmのdesc
Returns:
推論結果。サンプルごとのgenerator。
"""
with tk.log.trace("predict"):
callbacks = make_callbacks(callbacks, training=False)
logger.info(f"predict_flow: {ds.element_spec} {steps=}")
return _predict_flow(
model=model,
ds=ds,
steps=steps,
callbacks=callbacks,
verbose=verbose,
on_batch_fn=on_batch_fn,
desc=desc,
)
def _predict_flow(
model: tf.keras.models.Model,
ds: tf.data.Dataset,
steps: int,
callbacks: typing.List[tf.keras.callbacks.Callback],
verbose: int,
on_batch_fn: OnBatchFnType = None,
desc: str = "predict",
):
on_batch_fn = on_batch_fn or _predict_on_batch
for cb in callbacks:
cb.on_predict_begin()
batch = 0
for X in tk.utils.tqdm(ds, desc=desc, total=steps, disable=verbose < 1):
for cb in callbacks:
cb.on_predict_batch_begin(batch)
pred_batch = on_batch_fn(model, X)
for cb in callbacks:
cb.on_predict_batch_end(batch)
if isinstance(pred_batch, (list, tuple)): # multiple output
assert len(pred_batch) >= 2
for b in zip(*pred_batch):
yield list(b)
else:
yield from pred_batch
batch += 1
for cb in callbacks:
cb.on_predict_end()
def _predict_on_batch(model: tf.keras.models.Model, X):
return model.predict_on_batch(X)
def evaluate(
model: tf.keras.models.Model,
iterator: tk.data.Iterator,
callbacks: typing.List[tf.keras.callbacks.Callback] = None,
verbose: int = 1,
) -> typing.Dict[str, float]:
"""評価。
Args:
model: モデル
iterator: データ
callbacks: コールバック
verbose: 1ならプログレスバー表示
Returns:
メトリクス名と値のdict
"""
with tk.log.trace("evaluate"):
use_horovod = tk.hvd.is_active()
verbose = verbose if tk.hvd.is_master() else 0
callbacks = make_callbacks(callbacks, training=False)
dataset = tk.hvd.split(iterator.dataset) if use_horovod else iterator.dataset
ds, steps = iterator.data_loader.get_ds(dataset)
logger.info(f"evaluate: {ds.element_spec} {steps=}")
values = model.evaluate(
ds,
steps=steps,
verbose=verbose,
callbacks=callbacks,
)
values = tk.hvd.allreduce(values) if use_horovod else values
if len(model.metrics_names) == 1:
evals = {model.metrics_names[0]: values}
else:
evals = dict(zip(model.metrics_names, values))
return evals
def freeze_layers(
model: typing.Union[tf.keras.models.Model, tf.keras.layers.Layer], layer_class: type
):
"""指定したレイヤーをfreezeする。"""
for layer in model.layers:
if isinstance(layer, layer_class):
typing.cast(tf.keras.layers.Layer, layer).trainable = False
if hasattr(layer, "layers") and len(layer.layers) > 0:
freeze_layers(layer, layer_class)
def predict_on_batch_augmented(
model: tf.keras.models.Model,
X_batch: np.ndarray,
flip: typing.Tuple[bool, bool] = (False, True),
crop_size: typing.Tuple[int, int] = (3, 3),
padding_size: typing.Tuple[int, int] = (32, 32),
padding_mode: str = "edge",
) -> typing.Union[np.ndarray, typing.List[np.ndarray]]:
"""ミニバッチ1個分の推論処理&TTA。
Args:
model: モデル。
X_batch: データ。
flip: 水平/垂直方向の反転を行うか否か。(v, h)
crop_size: 縦横のcropのパターンの数。(v, h)
padding_size: crop前にパディングするサイズ。(v, h)
padding_mode: パディングの種類。(np.padのmode)
Returns:
推論結果のリスト。
"""
shape = X_batch.shape
X_batch = np.pad(
X_batch,
(
(0, 0),
(padding_size[0], padding_size[0]),
(padding_size[1], padding_size[1]),
(0, 0),
),
mode=padding_mode,
)
X_batch2: typing.List[typing.Any] = []
for y in np.linspace(0, padding_size[0] * 2, crop_size[0], dtype=np.int32):
for x in np.linspace(0, padding_size[1] * 2, crop_size[1], dtype=np.int32):
X = X_batch[:, x : x + shape[1], y : y + shape[2], :]
X_batch2.append(X)
if flip[0]:
X_batch2.append(X[:, ::-1, :, :])
if flip[1]:
X_batch2.append(X[:, :, ::-1, :])
if flip[0] and flip[1]:
X_batch2.append(X[:, ::-1, ::-1, :])
result = model.predict(
np.concatenate(X_batch2, axis=0), batch_size=shape[0], verbose=0
)
if isinstance(result, (list, tuple)): # multiple output
result = [
r.reshape((len(X_batch2), len(X_batch)) + r.shape[1:]) for r in result
]
else:
result = result.reshape((len(X_batch2), len(X_batch)) + result.shape[1:])
return result
def fingerprint(model: tf.keras.models.Model) -> str:
"""重みの同一性を確認するための文字列を作成して返す。"xx:xx:xx:xx"形式。"""
m = hashlib.sha256()
for w in model.get_weights():
m.update(w.tobytes())
h = m.hexdigest()
return f"{h[:2]}:{h[2:4]}:{h[4:6]}:{h[6:8]}"
def use_sam(model: tf.keras.models.Model, rho: float = 0.05):
"""Sharpness-Aware Minimization: <https://arxiv.org/abs/2010.01412>"""
model.train_step = functools.partial(sam_train_step, self=model, rho=rho)
@tf.function
def sam_train_step(data, self: tf.keras.models.Model = None, rho: float = 0.05):
"""Sharpness-Aware Minimization: <https://arxiv.org/abs/2010.01412>"""
assert self is not None
if isinstance(data, tuple) and len(data) == 2:
X, y_true = data
else:
X, y_true = data, 0
# 1st step
with tf.GradientTape() as tape:
y_pred = self(X, training=True)
loss = self.compiled_loss(y_true, y_pred, regularization_losses=self.losses)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
norm = tf.linalg.global_norm(gradients)
scale = rho / (norm + 1e-12)
e_w_list = []
for v, grad in zip(trainable_vars, gradients):
e_w = grad * scale
v.assign_add(e_w)
e_w_list.append(e_w)
# 2nd step
with tf.GradientTape() as tape:
y_pred_adv = self(X, training=True)
loss_adv = self.compiled_loss(
y_true, y_pred_adv, regularization_losses=self.losses
)
gradients_adv = tape.gradient(loss_adv, trainable_vars)
for v, e_w in zip(trainable_vars, e_w_list):
v.assign_sub(e_w)
# optimize
self.optimizer.apply_gradients(zip(gradients_adv, trainable_vars))
self.compiled_metrics.update_state(y_true, y_pred)
return {m.name: m.result() for m in self.metrics}
| mit |
Metaswitch/calico-neutron | neutron/tests/post_mortem_debug.py | 72 | 4237 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import traceback
def get_exception_handler(debugger_name):
debugger = _get_debugger(debugger_name)
return functools.partial(_exception_handler, debugger)
def _get_debugger(debugger_name):
try:
debugger = __import__(debugger_name)
except ImportError:
raise ValueError("can't import %s module as a post mortem debugger" %
debugger_name)
if 'post_mortem' in dir(debugger):
return debugger
else:
raise ValueError("%s is not a supported post mortem debugger" %
debugger_name)
def _exception_handler(debugger, exc_info):
"""Exception handler enabling post-mortem debugging.
A class extending testtools.TestCase can add this handler in setUp():
self.addOnException(post_mortem_debug.exception_handler)
When an exception occurs, the user will be dropped into a debugger
session in the execution environment of the failure.
Frames associated with the testing framework are excluded so that
the post-mortem session for an assertion failure will start at the
assertion call (e.g. self.assertTrue) rather than the framework code
that raises the failure exception (e.g. the assertTrue method).
"""
tb = exc_info[2]
ignored_traceback = get_ignored_traceback(tb)
if ignored_traceback:
tb = FilteredTraceback(tb, ignored_traceback)
traceback.print_exception(exc_info[0], exc_info[1], tb)
debugger.post_mortem(tb)
def get_ignored_traceback(tb):
"""Retrieve the first traceback of an ignored trailing chain.
Given an initial traceback, find the first traceback of a trailing
chain of tracebacks that should be ignored. The criteria for
whether a traceback should be ignored is whether its frame's
globals include the __unittest marker variable. This criteria is
culled from:
unittest.TestResult._is_relevant_tb_level
For example:
tb.tb_next => tb0.tb_next => tb1.tb_next
- If no tracebacks were to be ignored, None would be returned.
- If only tb1 was to be ignored, tb1 would be returned.
- If tb0 and tb1 were to be ignored, tb0 would be returned.
- If either of only tb or only tb0 was to be ignored, None would
be returned because neither tb or tb0 would be part of a
trailing chain of ignored tracebacks.
"""
# Turn the traceback chain into a list
tb_list = []
while tb:
tb_list.append(tb)
tb = tb.tb_next
# Find all members of an ignored trailing chain
ignored_tracebacks = []
for tb in reversed(tb_list):
if '__unittest' in tb.tb_frame.f_globals:
ignored_tracebacks.append(tb)
else:
break
# Return the first member of the ignored trailing chain
if ignored_tracebacks:
return ignored_tracebacks[-1]
class FilteredTraceback(object):
"""Wraps a traceback to filter unwanted frames."""
def __init__(self, tb, filtered_traceback):
"""Constructor.
:param tb: The start of the traceback chain to filter.
:param filtered_traceback: The first traceback of a trailing
chain that is to be filtered.
"""
self._tb = tb
self.tb_lasti = self._tb.tb_lasti
self.tb_lineno = self._tb.tb_lineno
self.tb_frame = self._tb.tb_frame
self._filtered_traceback = filtered_traceback
@property
def tb_next(self):
tb_next = self._tb.tb_next
if tb_next and tb_next != self._filtered_traceback:
return FilteredTraceback(tb_next, self._filtered_traceback)
| apache-2.0 |
skearnes/pylearn2 | pylearn2/costs/autoencoder.py | 5 | 6144 | """
.. todo::
WRITEME
"""
from theano import tensor
import theano.sparse
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from theano.tensor.shared_randomstreams import RandomStreams
class GSNFriendlyCost(DefaultDataSpecsMixin, Cost):
"""
.. todo::
WRITEME
"""
@staticmethod
def cost(target, output):
"""
.. todo::
WRITEME
"""
raise NotImplementedError
def expr(self, model, data, *args, **kwargs):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
return self.cost(X, model.reconstruct(X))
class MeanSquaredReconstructionError(GSNFriendlyCost):
"""
.. todo::
WRITEME
"""
@staticmethod
def cost(a, b):
"""
.. todo::
WRITEME
"""
return ((a - b) ** 2).sum(axis=1).mean()
class MeanBinaryCrossEntropy(GSNFriendlyCost):
"""
.. todo::
WRITEME
"""
@staticmethod
def cost(target, output):
"""
.. todo::
WRITEME
"""
return tensor.nnet.binary_crossentropy(output, target).sum(axis=1).mean()
class SampledMeanBinaryCrossEntropy(DefaultDataSpecsMixin, Cost):
"""
.. todo::
WRITEME properly
CE cost that goes with sparse autoencoder with L1 regularization on activations
For theory:
Y. Dauphin, X. Glorot, Y. Bengio. ICML2011
Large-Scale Learning of Embeddings with Reconstruction Sampling
Parameters
----------
L1 : WRITEME
ratio : WRITEME
"""
def __init__(self, L1, ratio):
self.random_stream = RandomStreams(seed=1)
self.L1 = L1
self.one_ratio = ratio
def expr(self, model, data, ** kwargs):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
# X is theano sparse
X_dense = theano.sparse.dense_from_sparse(X)
noise = self.random_stream.binomial(size=X_dense.shape, n=1,
prob=self.one_ratio, ndim=None)
# a random pattern that indicates to reconstruct all the 1s and some of the 0s in X
P = noise + X_dense
P = theano.tensor.switch(P>0, 1, 0)
P = tensor.cast(P, theano.config.floatX)
# L1 penalty on activations
reg_units = theano.tensor.abs_(model.encode(X)).sum(axis=1).mean()
# penalty on weights, optional
# params = model.get_params()
# W = params[2]
# there is a numerical problem when using
# tensor.log(1 - model.reconstruct(X, P))
# Pascal fixed it.
before_activation = model.reconstruct_without_dec_acti(X, P)
cost = ( 1 * X_dense *
tensor.log(tensor.log(1 + tensor.exp(-1 * before_activation))) +
(1 - X_dense) *
tensor.log(1 + tensor.log(1 + tensor.exp(before_activation)))
)
cost = (cost * P).sum(axis=1).mean()
cost = cost + self.L1 * reg_units
return cost
class SampledMeanSquaredReconstructionError(MeanSquaredReconstructionError):
"""
mse cost that goes with sparse autoencoder with L1 regularization on activations
For theory:
Y. Dauphin, X. Glorot, Y. Bengio. ICML2011
Large-Scale Learning of Embeddings with Reconstruction Sampling
Parameters
----------
L1 : WRITEME
ratio : WRITEME
"""
def __init__(self, L1, ratio):
self.random_stream = RandomStreams(seed=1)
self.L1 = L1
self.ratio = ratio
def expr(self, model, data, ** kwargs):
"""
.. todo::
WRITEME
"""
self.get_data_specs(model)[0].validate(data)
X = data
# X is theano sparse
X_dense=theano.sparse.dense_from_sparse(X)
noise = self.random_stream.binomial(size=X_dense.shape, n=1, prob=self.ratio, ndim=None)
# a random pattern that indicates to reconstruct all the 1s and some of the 0s in X
P = noise + X_dense
P = theano.tensor.switch(P>0, 1, 0)
P = tensor.cast(P, theano.config.floatX)
# L1 penalty on activations
L1_units = theano.tensor.abs_(model.encode(X)).sum(axis=1).mean()
# penalty on weights, optional
#params = model.get_params()
#W = params[2]
#L1_weights = theano.tensor.abs_(W).sum()
cost = ((model.reconstruct(X, P) - X_dense) ** 2)
cost = (cost * P).sum(axis=1).mean()
cost = cost + self.L1 * L1_units
return cost
#class MeanBinaryCrossEntropyTanh(Cost):
# def expr(self, model, data):
# self.get_data_specs(model)[0].validate(data)
# X = data
# X = (X + 1) / 2.
# return (
# tensor.xlogx.xlogx(model.reconstruct(X)) +
# tensor.xlogx.xlogx(1 - model.reconstruct(X))
# ).sum(axis=1).mean()
#
# def get_data_specs(self, model):
# return (model.get_input_space(), model.get_input_source())
class SparseActivation(DefaultDataSpecsMixin, Cost):
"""
Autoencoder sparse activation cost.
Regularize on KL divergence from desired average activation of each
hidden unit as described in Andrew Ng's CS294A Lecture Notes. See
http://www.stanford.edu/class/cs294a/sparseAutoencoder_2011new.pdf.
Parameters
----------
coeff : float
Coefficient for this regularization term in the objective
function.
p : float
Desired average activation of each hidden unit.
"""
def __init__(self, coeff, p):
self.coeff = coeff
self.p = p
def expr(self, model, data, **kwargs):
"""
Calculate regularization penalty.
"""
X = data
p = self.p
p_hat = tensor.abs_(model.encode(X)).mean(axis=0)
kl = p * tensor.log(p / p_hat) + (1 - p) * \
tensor.log((1 - p) / (1 - p_hat))
penalty = self.coeff * kl.sum()
penalty.name = 'sparse_activation_penalty'
return penalty
| bsd-3-clause |
CapOM/ChromiumGStreamerBackend | tools/screenshot_testing/update_golden_screenshots.py | 59 | 3403 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import getopt
import os
here = os.path.realpath(__file__)
src_path = (os.path.normpath(os.path.join(here, '..', '..', '..')))
sys.path.append(os.path.normpath(os.path.join(src_path, '..', 'depot_tools')))
USAGE = 'The utility uploads .png files to ' \
'chrome-os-oobe-ui-screenshot-testing Google Storage bucket.\n' \
'-i:\n\tdirectory with .png files which have to be uploaded\n' \
'-o (optional):\n\tdirectory to store generated .sha1 files. ' \
'Is set to chrome/browser/chromeos/login/screenshot_testing' \
'/golden_screenshots by default\n--help:\n\thelp'
import upload_to_google_storage
import download_from_google_storage
def upload(png_path):
# Creating a list of files which need to be uploaded to Google Storage:
# all .png files from the directory containing golden screenshots.
target = []
for file in os.listdir(png_path):
if file.endswith('.png'):
target.append(os.path.join(png_path, file))
# Creating a standard gsutil object, assuming there are depot_tools
# and everything related is set up already.
gsutil_path = os.path.abspath(os.path.join(src_path, '..', 'depot_tools',
'third_party', 'gsutil',
'gsutil'))
gsutil = download_from_google_storage.Gsutil(gsutil_path,
boto_path=None,
bypass_prodaccess=True)
# URL of the bucket used for storing screenshots.
bucket_url = 'gs://chrome-os-oobe-ui-screenshot-testing'
# Uploading using the most simple way,
# see depot_tools/upload_to_google_storage.py to have better understanding
# of this False and 1 arguments.
upload_to_google_storage.upload_to_google_storage(target, bucket_url, gsutil,
False, False, 1, False)
print 'All images are uploaded to Google Storage.'
def move_sha1(from_path, to_path):
from shutil import move
for file in os.listdir(from_path):
if (file.endswith('.sha1')):
old_place = os.path.join(from_path, file)
new_place = os.path.join(to_path, file)
if not os.path.exists(os.path.dirname(new_place)):
os.makedirs(os.path.dirname(new_place))
move(old_place, new_place)
def main(argv):
png_path = ''
sha1_path = os.path.join(src_path,
'chrome', 'browser', 'chromeos', 'login',
'screenshot_testing', 'golden_screenshots')
try:
opts, args = getopt.getopt(argv,'i:o:', ['--help'])
except getopt.GetoptError:
print USAGE
sys.exit(1)
for opt, arg in opts:
if opt == '--help':
print USAGE
sys.exit()
elif opt == '-i':
png_path = arg
elif opt =='-o':
sha1_path = arg
if png_path == '':
print USAGE
sys.exit(1)
png_path = os.path.abspath(png_path)
sha1_path = os.path.abspath(sha1_path)
upload(png_path)
move_sha1(png_path, sha1_path)
# TODO(elizavetai): Can this git stuff be done automatically?
print 'Please add new .sha1 files from ' \
+ str(sha1_path) + \
' to git manually.'
if __name__ == "__main__":
main(sys.argv[1:]) | bsd-3-clause |
google-research/google-research | smu/smu_sqlite.py | 1 | 7381 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to a SQLite DB file for SMU data.
Provides a simpler interface than SQL to create and access the SMU data in an
SQLite database.
The majority of the data is stored as a blob, with just the bond topology id and
smiles string pulled out as fields.
"""
import os
from absl import logging
import sqlite3
from smu import dataset_pb2
_CONFORMER_TABLE_NAME = 'conformer'
_BTID_TABLE_NAME = 'btid'
_SMILES_TABLE_NAME = 'smiles'
class ReadOnlyError(Exception):
pass
class SMUSQLite:
"""Provides an interface for SMU data to a SQLite DB file.
The class hides away all the SQL fun with just Conformer protobuf visible in
the interface.
Internal details about the tables:
There are 3 separate tables
* conformer: Is the primary table which has columns
* cid: integer conformer id (unique)
* conformer: blob wire format proto of a conformer proto
* btid: Used for lookups by bond topology id which has columns
* btid: integer bond topology id (not unique)
* cid: integer conformer id (not unique)
* smiles: Used to map smiles to bond topology ids with columns
* smiles: text canonical smiles string (unique)
* btid: integer bond topology id
Note that if multiple smiles strings are associated with the same bond
toplogy id, the first one provided will be silently kept.
"""
def __init__(self, filename, mode):
"""Creates SMUSQLite.
Args:
filename: database file, must be on local filesystem
mode: 'c' (create, deletes existing), 'w' (writable), 'r' (read only)
Raises:
FileNotFoundError: if 'r' and file does not exist
"""
if mode == 'c':
if os.path.exists(filename):
os.remove(filename)
self._read_only = False
self._conn = sqlite3.connect(filename)
self._maybe_init_db()
elif mode == 'w':
self._read_only = False
self._conn = sqlite3.connect(filename)
self._maybe_init_db()
elif mode == 'r':
if not os.path.exists(filename):
raise FileNotFoundError(filename)
self._conn = sqlite3.connect(filename)
self._read_only = True
else:
raise ValueError('Mode must be c, r, or w')
self._conn = sqlite3.connect(filename)
def _maybe_init_db(self):
"""Create the table and indices if they do not exist."""
make_table = (f'CREATE TABLE IF NOT EXISTS {_CONFORMER_TABLE_NAME} '
'(cid INTEGER PRIMARY KEY, conformer BLOB)')
self._conn.execute(make_table)
self._conn.execute(f'CREATE UNIQUE INDEX IF NOT EXISTS '
f'idx_cid ON {_CONFORMER_TABLE_NAME} (cid)')
self._conn.execute(f'CREATE TABLE IF NOT EXISTS {_BTID_TABLE_NAME} '
'(btid INTEGER, cid INTEGER)')
self._conn.execute(f'CREATE INDEX IF NOT EXISTS '
f'idx_btid ON {_BTID_TABLE_NAME} (btid)')
self._conn.execute(f'CREATE TABLE IF NOT EXISTS {_SMILES_TABLE_NAME} '
'(smiles TEXT, btid INTEGER)')
self._conn.execute(f'CREATE UNIQUE INDEX IF NOT EXISTS '
f'idx_smiles ON {_SMILES_TABLE_NAME} (smiles)')
self._conn.commit()
def bulk_insert(self, conformers, batch_size=10000):
"""Inserts conformers into the database.
Args:
conformers: iterable for dataset_pb2.Conformer
batch_size: insert performance is greatly improved by putting multiple
insert into one transaction. 10k was a reasonable default from some
early exploration.
Raises:
ReadOnlyError: if mode is 'r'
"""
if self._read_only:
raise ReadOnlyError()
insert_conformer = f'INSERT INTO {_CONFORMER_TABLE_NAME} VALUES (?, ?)'
insert_btid = f'INSERT INTO {_BTID_TABLE_NAME} VALUES (?, ?)'
insert_smiles = (f'INSERT INTO {_SMILES_TABLE_NAME} VALUES (?, ?) '
f'ON CONFLICT(smiles) DO NOTHING')
cur = self._conn.cursor()
for idx, conformer in enumerate(conformers, 1):
cur.execute(insert_conformer,
(conformer.conformer_id, conformer.SerializeToString()))
for bond_topology in conformer.bond_topologies:
cur.execute(insert_btid, (bond_topology.bond_topology_id,
conformer.conformer_id))
cur.execute(insert_smiles,
(bond_topology.smiles,
bond_topology.bond_topology_id))
if batch_size and idx % batch_size == 0:
logging.info('bulk_insert: committing at index %d', idx)
self._conn.commit()
self._conn.commit()
def find_by_conformer_id(self, cid):
"""Finds the conformer associated with a conformer id.
Args:
cid: conformer id to look up.
Returns:
dataset_pb2.Conformer
Raises:
KeyError: if cid is not found
"""
cur = self._conn.cursor()
select = f'SELECT conformer FROM {_CONFORMER_TABLE_NAME} WHERE cid = ?'
cur.execute(select, (cid,))
result = cur.fetchall()
if not result:
raise KeyError(f'Conformer id {cid} not found')
# Since it's a unique index, there should only be one result and it's a
# tuple with one value.
assert len(result) == 1
assert len(result[0]) == 1
return dataset_pb2.Conformer().FromString(result[0][0])
def find_by_bond_topology_id(self, btid):
"""Finds all the conformer associated with a bond topology id.
Args:
btid: bond topology id to look up.
Returns:
iterable of dataset_pb2.Conformer
"""
cur = self._conn.cursor()
select = (f'SELECT cid, conformer '
f'FROM {_CONFORMER_TABLE_NAME} '
f'INNER JOIN {_BTID_TABLE_NAME} USING(cid) '
f'WHERE {_BTID_TABLE_NAME}.btid = ?')
cur.execute(select, (btid,))
return (dataset_pb2.Conformer().FromString(result[1]) for result in cur)
def find_by_smiles(self, smiles):
"""Finds all conformer associated with a given smiles string.
Args:
smiles: string
Returns:
iterable for dataset_pb2.Conformer
"""
# TODO(pfr): add canonicalization here
cur = self._conn.cursor()
select = f'SELECT btid FROM {_SMILES_TABLE_NAME} WHERE smiles = ?'
cur.execute(select, (smiles,))
result = cur.fetchall()
if not result:
return []
# Since it's a unique index, there should only be one result and it's a
# tuple with one value.
assert len(result) == 1
assert len(result[0]) == 1
return self.find_by_bond_topology_id(result[0][0])
def __iter__(self):
"""Iterates through all dataset_pb2.Conformer in the DB."""
select = f'SELECT conformer FROM {_CONFORMER_TABLE_NAME} ORDER BY rowid'
cur = self._conn.cursor()
cur.execute(select)
return (dataset_pb2.Conformer().FromString(result[0]) for result in cur)
| apache-2.0 |
GeotrekCE/Geotrek-admin | mapentity/urls.py | 3 | 1439 | from django.conf import settings
from django.urls import path, re_path, include
from .settings import app_settings
from .registry import registry
from .views import (map_screenshot, history_delete,
serve_attachment, JSSettings, Convert)
if app_settings['ACTION_HISTORY_ENABLED']:
from .models import LogEntry
_MEDIA_URL = settings.MEDIA_URL.replace(app_settings['ROOT_URL'], '')
if _MEDIA_URL.startswith('/'):
_MEDIA_URL = _MEDIA_URL[1:]
if _MEDIA_URL.endswith('/'):
_MEDIA_URL = _MEDIA_URL[:-1]
app_name = 'mapentity'
urlpatterns = [
path('map_screenshot/', map_screenshot, name='map_screenshot'),
path('convert/', Convert.as_view(), name='convert'),
path('history/delete/', history_delete, name='history_delete'),
path('api/auth/', include('rest_framework.urls')),
# See default value in app_settings.JS_SETTINGS.
# Will be overriden, most probably.
path('api/settings.json', JSSettings.as_view(), name='js_settings'),
]
if settings.DEBUG or app_settings['SENDFILE_HTTP_HEADER']:
urlpatterns += [
re_path(r'^%s/(?P<path>paperclip/.*)$' % _MEDIA_URL, serve_attachment),
]
if app_settings['ACTION_HISTORY_ENABLED']:
from mapentity.registry import MapEntityOptions
class LogEntryOptions(MapEntityOptions):
menu = False
dynamic_views = ['List', 'JsonList', 'Layer']
urlpatterns += registry.register(LogEntry, LogEntryOptions)
| bsd-2-clause |
chiviak/CouchPotatoServer | libs/subliminal/core.py | 53 | 12854 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .exceptions import DownloadFailedError
from .services import ServiceConfig
from .tasks import DownloadTask, ListTask
from .utils import get_keywords
from .videos import Episode, Movie, scan
from .language import Language
from collections import defaultdict
from itertools import groupby
import bs4
import guessit
import logging
__all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE',
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
'key_subtitles', 'group_by_video']
logger = logging.getLogger(__name__)
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'subscenter']
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)
def create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter):
"""Create a list of :class:`~subliminal.tasks.ListTask` from one or more paths using the given criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param set languages: languages to search for
:param list services: services to use for the search
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:return: the created tasks
:rtype: list of :class:`~subliminal.tasks.ListTask`
"""
scan_result = []
for p in paths:
scan_result.extend(scan(p, max_depth, scan_filter))
logger.debug(u'Found %d videos in %r with maximum depth %d' % (len(scan_result), paths, max_depth))
tasks = []
config = ServiceConfig(multi, cache_dir)
services = filter_services(services)
for video, detected_subtitles in scan_result:
detected_languages = set(s.language for s in detected_subtitles)
wanted_languages = languages.copy()
if not force and multi:
wanted_languages -= detected_languages
if not wanted_languages:
logger.debug(u'No need to list multi subtitles %r for %r because %r detected' % (languages, video, detected_languages))
continue
if not force and not multi and Language('Undetermined') in detected_languages:
logger.debug(u'No need to list single subtitles %r for %r because one detected' % (languages, video))
continue
logger.debug(u'Listing subtitles %r for %r with services %r' % (wanted_languages, video, services))
for service_name in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
service = mod.Service
if not service.check_validity(video, wanted_languages):
continue
task = ListTask(video, wanted_languages & service.languages, service_name, config)
logger.debug(u'Created task %r' % task)
tasks.append(task)
return tasks
def create_download_tasks(subtitles_by_video, languages, multi):
"""Create a list of :class:`~subliminal.tasks.DownloadTask` from a list results grouped by video
:param subtitles_by_video: :class:`~subliminal.tasks.ListTask` results with ordered subtitles
:type subtitles_by_video: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`]
:param languages: languages in preferred order
:type languages: :class:`~subliminal.language.language_list`
:param bool multi: download multiple languages for the same video
:return: the created tasks
:rtype: list of :class:`~subliminal.tasks.DownloadTask`
"""
tasks = []
for video, subtitles in subtitles_by_video.iteritems():
if not subtitles:
continue
if not multi:
task = DownloadTask(video, list(subtitles))
logger.debug(u'Created task %r' % task)
tasks.append(task)
continue
for _, by_language in groupby(subtitles, lambda s: languages.index(s.language)):
task = DownloadTask(video, list(by_language))
logger.debug(u'Created task %r' % task)
tasks.append(task)
return tasks
def consume_task(task, services=None):
"""Consume a task. If the ``services`` parameter is given, the function will attempt
to get the service from it. In case the service is not in ``services``, it will be initialized
and put in ``services``
:param task: task to consume
:type task: :class:`~subliminal.tasks.ListTask` or :class:`~subliminal.tasks.DownloadTask`
:param dict services: mapping between the service name and an instance of this service
:return: the result of the task
:rtype: list of :class:`~subliminal.subtitles.ResultSubtitle`
"""
if services is None:
services = {}
logger.info(u'Consuming %r' % task)
result = None
if isinstance(task, ListTask):
service = get_service(services, task.service, config=task.config)
result = service.list(task.video, task.languages)
elif isinstance(task, DownloadTask):
for subtitle in task.subtitles:
service = get_service(services, subtitle.service)
try:
service.download(subtitle)
result = [subtitle]
break
except DownloadFailedError:
logger.warning(u'Could not download subtitle %r, trying next' % subtitle)
continue
if result is None:
logger.error(u'No subtitles could be downloaded for video %r' % task.video)
return result
def matching_confidence(video, subtitle):
"""Compute the probability (confidence) that the subtitle matches the video
:param video: video to match
:type video: :class:`~subliminal.videos.Video`
:param subtitle: subtitle to match
:type subtitle: :class:`~subliminal.subtitles.Subtitle`
:return: the matching probability
:rtype: float
"""
guess = guessit.guess_file_info(subtitle.release, 'autodetect')
video_keywords = get_keywords(video.guess)
subtitle_keywords = get_keywords(guess) | subtitle.keywords
logger.debug(u'Video keywords %r - Subtitle keywords %r' % (video_keywords, subtitle_keywords))
replacement = {'keywords': len(video_keywords & subtitle_keywords)}
if isinstance(video, Episode):
replacement.update({'series': 0, 'season': 0, 'episode': 0})
matching_format = '{series:b}{season:b}{episode:b}{keywords:03b}'
best = matching_format.format(series=1, season=1, episode=1, keywords=len(video_keywords))
if guess['type'] in ['episode', 'episodesubtitle']:
if 'series' in guess and guess['series'].lower() == video.series.lower():
replacement['series'] = 1
if 'season' in guess and guess['season'] == video.season:
replacement['season'] = 1
if 'episodeNumber' in guess and guess['episodeNumber'] == video.episode:
replacement['episode'] = 1
elif isinstance(video, Movie):
replacement.update({'title': 0, 'year': 0})
matching_format = '{title:b}{year:b}{keywords:03b}'
best = matching_format.format(title=1, year=1, keywords=len(video_keywords))
if guess['type'] in ['movie', 'moviesubtitle']:
if 'title' in guess and guess['title'].lower() == video.title.lower():
replacement['title'] = 1
if 'year' in guess and guess['year'] == video.year:
replacement['year'] = 1
else:
logger.debug(u'Not able to compute confidence for %r' % video)
return 0.0
logger.debug(u'Found %r' % replacement)
confidence = float(int(matching_format.format(**replacement), 2)) / float(int(best, 2))
logger.info(u'Computed confidence %.4f for %r and %r' % (confidence, video, subtitle))
return confidence
def get_service(services, service_name, config=None):
"""Get a service from its name in the service dict with the specified config.
If the service does not exist in the service dict, it is created and added to the dict.
:param dict services: dict where to get existing services or put created ones
:param string service_name: name of the service to get
:param config: config to use for the service
:type config: :class:`~subliminal.services.ServiceConfig` or None
:return: the corresponding service
:rtype: :class:`~subliminal.services.ServiceBase`
"""
if service_name not in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
services[service_name] = mod.Service()
services[service_name].init()
services[service_name].config = config
return services[service_name]
def key_subtitles(subtitle, video, languages, services, order):
"""Create a key to sort subtitle using the given order
:param subtitle: subtitle to sort
:type subtitle: :class:`~subliminal.subtitles.ResultSubtitle`
:param video: video to match
:type video: :class:`~subliminal.videos.Video`
:param list languages: languages in preferred order
:param list services: services in preferred order
:param order: preferred order for subtitles sorting
:type list: list of :data:`LANGUAGE_INDEX`, :data:`SERVICE_INDEX`, :data:`SERVICE_CONFIDENCE`, :data:`MATCHING_CONFIDENCE`
:return: a key ready to use for subtitles sorting
:rtype: int
"""
key = ''
for sort_item in order:
if sort_item == LANGUAGE_INDEX:
key += '{0:03d}'.format(len(languages) - languages.index(subtitle.language) - 1)
key += '{0:01d}'.format(subtitle.language == languages[languages.index(subtitle.language)])
elif sort_item == SERVICE_INDEX:
key += '{0:02d}'.format(len(services) - services.index(subtitle.service) - 1)
elif sort_item == SERVICE_CONFIDENCE:
key += '{0:04d}'.format(int(subtitle.confidence * 1000))
elif sort_item == MATCHING_CONFIDENCE:
confidence = 0
if subtitle.release:
confidence = matching_confidence(video, subtitle)
key += '{0:04d}'.format(int(confidence * 1000))
return int(key)
def group_by_video(list_results):
"""Group the results of :class:`ListTasks <subliminal.tasks.ListTask>` into a
dictionary of :class:`~subliminal.videos.Video` => :class:`~subliminal.subtitles.Subtitle`
:param list_results:
:type list_results: list of result of :class:`~subliminal.tasks.ListTask`
:return: subtitles grouped by videos
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`]
"""
result = defaultdict(list)
for video, subtitles in list_results:
result[video] += subtitles or []
return result
def filter_services(services):
"""Filter out services that are not available because of a missing feature
:param list services: service names to filter
:return: a copy of the initial list of service names without unavailable ones
:rtype: list
"""
filtered_services = services[:]
for service_name in services:
mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1)
service = mod.Service
if service.required_features is not None and bs4.builder_registry.lookup(*service.required_features) is None:
logger.warning(u'Service %s not available: none of available features could be used. One of %r required' % (service_name, service.required_features))
filtered_services.remove(service_name)
return filtered_services
| gpl-3.0 |
panosmdma/SlackOnly-SlackBuilds | development/python3-matplotlib/setupext.py | 2 | 58330 | from __future__ import print_function, absolute_import
from distutils import sysconfig
from distutils import version
from distutils.core import Extension
import glob
import io
import multiprocessing
import os
import re
import subprocess
import sys
import warnings
from textwrap import fill
import versioneer
PY3min = (sys.version_info[0] >= 3)
PY32min = (PY3min and sys.version_info[1] >= 2 or sys.version_info[0] > 3)
try:
from subprocess import check_output
except ImportError:
# check_output is not available in Python 2.6
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte
string.
Backported from Python 2.7 as it's implemented as pure python
on stdlib.
"""
process = subprocess.Popen(
stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
if sys.platform != 'win32':
if not PY3min:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
if PY3min:
import configparser
else:
import ConfigParser as configparser
# matplotlib build options, which can be altered using setup.cfg
options = {
'display_status': True,
'verbose': False,
'backend': None,
'basedirlist': None
}
setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
if os.path.exists(setup_cfg):
if PY32min:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
config.read(setup_cfg)
try:
options['display_status'] = not config.getboolean("status", "suppress")
except:
pass
try:
options['backend'] = config.get("rc_options", "backend")
except:
pass
try:
options['basedirlist'] = [
x.strip() for x in
config.get("directories", "basedirlist").split(',')]
except:
pass
else:
config = None
def get_win32_compiler():
"""
Determine the compiler being used on win32.
"""
# Used to determine mingw32 or msvc
# This is pretty bad logic, someone know a better way?
for v in sys.argv:
if 'mingw32' in v:
return 'mingw32'
return 'msvc'
win32_compiler = get_win32_compiler()
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals()
def has_include_file(include_dirs, filename):
"""
Returns `True` if `filename` can be found in one of the
directories in `include_dirs`.
"""
if sys.platform == 'win32':
include_dirs += os.environ.get('INCLUDE', '.').split(';')
for dir in include_dirs:
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def check_include_file(include_dirs, filename, package):
"""
Raises an exception if the given include file can not be found.
"""
if not has_include_file(include_dirs, filename):
raise CheckFailed(
"The C/C++ header for %s (%s) could not be found. You "
"may need to install the development package." %
(package, filename))
def get_base_dirs():
"""
Returns a list of standard base directories on this platform.
"""
if options['basedirlist']:
return options['basedirlist']
basedir_map = {
'win32': ['win32_static', ],
'darwin': ['/usr/local/', '/usr', '/usr/X11',
'/opt/X11', '/opt/local'],
'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local', ],
'gnu0': ['/usr'],
'aix5': ['/usr/local'],
}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
def get_include_dirs():
"""
Returns a list of standard include directories on this platform.
"""
include_dirs = [os.path.join(d, 'include') for d in get_base_dirs()]
include_dirs.extend(
os.environ.get('CPLUS_INCLUDE_PATH', '').split(os.pathsep))
return include_dirs
def is_min_version(found, minversion):
"""
Returns `True` if `found` is at least as high a version as
`minversion`.
"""
expected_version = version.LooseVersion(minversion)
found_version = version.LooseVersion(found)
return found_version >= expected_version
# Define the display functions only if display_status is True.
if options['display_status']:
def print_line(char='='):
print(char * 76)
def print_status(package, status):
initial_indent = "%22s: " % package
indent = ' ' * 24
print(fill(str(status), width=76,
initial_indent=initial_indent,
subsequent_indent=indent))
def print_message(message):
indent = ' ' * 24 + "* "
print(fill(str(message), width=76,
initial_indent=indent,
subsequent_indent=indent))
def print_raw(section):
print(section)
else:
def print_line(*args, **kwargs):
pass
print_status = print_message = print_raw = print_line
# Remove the -Wstrict-prototypesoption, is it's not valid for C++
customize_compiler = sysconfig.customize_compiler
def my_customize_compiler(compiler):
retval = customize_compiler(compiler)
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
sysconfig.customize_compiler = my_customize_compiler
def make_extension(name, files, *args, **kwargs):
"""
Make a new extension. Automatically sets include_dirs and
library_dirs to the base directories appropriate for this
platform.
`name` is the name of the extension.
`files` is a list of source files.
Any additional arguments are passed to the
`distutils.core.Extension` constructor.
"""
ext = DelayedExtension(name, files, *args, **kwargs)
for dir in get_base_dirs():
include_dir = os.path.join(dir, 'include')
if os.path.exists(include_dir):
ext.include_dirs.append(include_dir)
for lib in ('lib', 'lib64'):
lib_dir = os.path.join(dir, lib)
if os.path.exists(lib_dir):
ext.library_dirs.append(lib_dir)
ext.include_dirs.append('.')
return ext
class PkgConfig(object):
"""
This is a class for communicating with pkg-config.
"""
def __init__(self):
"""
Determines whether pkg-config exists on this machine.
"""
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
try:
self.pkg_config = os.environ['PKG_CONFIG']
except KeyError:
self.pkg_config = 'pkg-config'
self.set_pkgconfig_path()
status, output = getstatusoutput(self.pkg_config + " --help")
self.has_pkgconfig = (status == 0)
if not self.has_pkgconfig:
print("IMPORTANT WARNING:")
print(
" pkg-config is not installed.\n"
" matplotlib may not be able to find some of its dependencies")
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
try:
os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
except KeyError:
os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
def setup_extension(self, ext, package, default_include_dirs=[],
default_library_dirs=[], default_libraries=[],
alt_exec=None):
"""
Add parameters to the given `ext` for the given `package`.
"""
flag_map = {
'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
executable = alt_exec
if self.has_pkgconfig:
executable = (self.pkg_config + ' {0}').format(package)
use_defaults = True
if executable is not None:
command = "{0} --libs --cflags ".format(executable)
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
else:
output = output.decode(sys.getfilesystemencoding())
use_defaults = False
for token in output.split():
attr = flag_map.get(token[:2])
if attr is not None:
getattr(ext, attr).insert(0, token[2:])
if use_defaults:
basedirs = get_base_dirs()
for base in basedirs:
for include in default_include_dirs:
dir = os.path.join(base, include)
if os.path.exists(dir):
ext.include_dirs.append(dir)
for lib in default_library_dirs:
dir = os.path.join(base, lib)
if os.path.exists(dir):
ext.library_dirs.append(dir)
ext.libraries.extend(default_libraries)
return True
return False
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
status, output = getstatusoutput(
self.pkg_config + " %s --modversion" % (package))
if status == 0:
return output
return None
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class CheckFailed(Exception):
"""
Exception thrown when a `SetupPackage.check` method fails.
"""
pass
class SetupPackage(object):
optional = False
def check(self):
"""
Checks whether the dependencies are met. Should raise a
`CheckFailed` exception if the dependency could not be met,
otherwise return a string indicating a version number or some
other message indicating what was found.
"""
pass
def get_packages(self):
"""
Get a list of package names to add to the configuration.
These are added to the `packages` list passed to
`distutils.setup`.
"""
return []
def get_namespace_packages(self):
"""
Get a list of namespace package names to add to the configuration.
These are added to the `namespace_packages` list passed to
`distutils.setup`.
"""
return []
def get_py_modules(self):
"""
Get a list of top-level modules to add to the configuration.
These are added to the `py_modules` list passed to
`distutils.setup`.
"""
return []
def get_package_data(self):
"""
Get a package data dictionary to add to the configuration.
These are merged into to the `package_data` list passed to
`distutils.setup`.
"""
return {}
def get_extension(self):
"""
Get a list of C extensions (`distutils.core.Extension`
objects) to add to the configuration. These are added to the
`extensions` list passed to `distutils.setup`.
"""
return None
def get_install_requires(self):
"""
Get a list of Python packages that we require.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Get a list of Python packages that we require at build time.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def _check_for_pkg_config(self, package, include_file, min_version=None,
version=None):
"""
A convenience function for writing checks for a
pkg_config-defined dependency.
`package` is the pkg_config package name.
`include_file` is a top-level include file we expect to find.
`min_version` is the minimum version required.
`version` will override the found version if this package
requires an alternate method for that. Set version='unknown'
if the version is not known but you still want to disabled
pkg_config version check.
"""
if version is None:
version = pkg_config.get_version(package)
if version is None:
raise CheckFailed(
"pkg-config information for '%s' could not be found." %
package)
if min_version == 'PATCH':
raise CheckFailed(
"Requires patches that have not been merged upstream.")
if min_version and version != 'unknown':
if (not is_min_version(version, min_version)):
raise CheckFailed(
"Requires %s %s or later. Found %s." %
(package, min_version, version))
ext = self.get_extension()
if ext is None:
ext = make_extension('test', [])
pkg_config.setup_extension(ext, package)
check_include_file(
ext.include_dirs + get_include_dirs(), include_file, package)
return 'version %s' % version
class OptionalPackage(SetupPackage):
optional = True
force = False
config_category = "packages"
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (case
insensitively defined as 1, true, yes, on for True) or opted-out (case
insensitively defined as 0, false, no, off for False).
"""
conf = "auto"
if config is not None and config.has_option(cls.config_category, cls.name):
try:
conf = config.getboolean(cls.config_category, cls.name)
except ValueError:
conf = config.get(cls.config_category, cls.name)
return conf
def check(self):
"""
Do not override this method!
For custom dependency checks override self.check_requirements().
Two things are checked: Configuration file and requirements.
"""
# Check configuration file
conf = self.get_config()
# Default "auto" state or install forced by user
if conf in [True, 'auto']:
message = "installing"
# Set non-optional if user sets `True` in config
if conf is True:
self.optional = False
# Configuration opt-out by user
else:
# Some backend extensions (e.g. Agg) need to be built for certain
# other GUI backends (e.g. TkAgg) even when manually disabled
if self.force is True:
message = "installing forced (config override)"
else:
raise CheckFailed("skipping due to configuration")
# Check requirements and add extra information (if any) to message.
# If requirements are not met a CheckFailed should be raised in there.
additional_info = self.check_requirements()
if additional_info:
message += ", " + additional_info
# No CheckFailed raised until now, return install message.
return message
def check_requirements(self):
"""
Override this method to do custom dependency checks.
- Raise CheckFailed() if requirements are not met.
- Return message with additional information, or an empty string
(or None) for no additional information.
"""
return ""
class OptionalBackendPackage(OptionalPackage):
config_category = "gui_support"
class Platform(SetupPackage):
name = "platform"
def check(self):
return sys.platform
class Python(SetupPackage):
name = "python"
def check(self):
major, minor1, minor2, s, tmp = sys.version_info
if major < 2:
raise CheckFailed(
"Requires Python 2.6 or later")
elif major == 2 and minor1 < 6:
raise CheckFailed(
"Requires Python 2.6 or later (in the 2.x series)")
elif major == 3 and minor1 < 1:
raise CheckFailed(
"Requires Python 3.1 or later (in the 3.x series)")
return sys.version
class Matplotlib(SetupPackage):
name = "matplotlib"
def check(self):
return versioneer.get_version()
def get_packages(self):
return [
'matplotlib',
'matplotlib.backends',
'matplotlib.backends.qt_editor',
'matplotlib.compat',
'matplotlib.projections',
'matplotlib.axes',
'matplotlib.sphinxext',
'matplotlib.style',
'matplotlib.testing',
'matplotlib.testing.jpl_units',
'matplotlib.tri',
]
def get_py_modules(self):
return ['pylab']
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/fonts/afm/*.afm',
'mpl-data/fonts/pdfcorefonts/*.afm',
'mpl-data/fonts/pdfcorefonts/*.txt',
'mpl-data/fonts/ttf/*.ttf',
'mpl-data/fonts/ttf/LICENSE_STIX',
'mpl-data/fonts/ttf/COPYRIGHT.TXT',
'mpl-data/fonts/ttf/README.TXT',
'mpl-data/fonts/ttf/RELEASENOTES.TXT',
'mpl-data/images/*.xpm',
'mpl-data/images/*.svg',
'mpl-data/images/*.gif',
'mpl-data/images/*.pdf',
'mpl-data/images/*.png',
'mpl-data/images/*.ppm',
'mpl-data/example/*.npy',
'mpl-data/matplotlibrc',
'backends/web_backend/*.*',
'backends/web_backend/jquery/js/*.min.js',
'backends/web_backend/jquery/css/themes/base/*.min.css',
'backends/web_backend/jquery/css/themes/base/images/*',
'backends/web_backend/css/*.*',
'backends/Matplotlib.nib/*',
'mpl-data/stylelib/*.mplstyle',
]}
class SampleData(OptionalPackage):
"""
This handles the sample data that ships with matplotlib. It is
technically optional, though most often will be desired.
"""
name = "sample_data"
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/sample_data/*.*',
'mpl-data/sample_data/axes_grid/*.*',
]}
class Toolkits(OptionalPackage):
name = "toolkits"
def get_packages(self):
return [
'mpl_toolkits',
'mpl_toolkits.mplot3d',
'mpl_toolkits.axes_grid',
'mpl_toolkits.axes_grid1',
'mpl_toolkits.axisartist',
]
def get_namespace_packages(self):
return ['mpl_toolkits']
class Tests(OptionalPackage):
name = "tests"
nose_min_version = '0.11.1'
def check(self):
super(Tests, self).check()
msgs = []
msg_template = ('{package} is required to run the matplotlib test '
'suite. Please install it with pip or your preferred'
' tool to run the test suite')
bad_nose = msg_template.format(
package='nose %s or later' % self.nose_min_version
)
try:
import nose
if is_min_version(nose.__version__, self.nose_min_version):
msgs += ['using nose version %s' % nose.__version__]
else:
msgs += [bad_nose]
except ImportError:
msgs += [bad_nose]
if sys.version_info >= (3, 3):
msgs += ['using unittest.mock']
else:
try:
import mock
msgs += ['using mock %s' % mock.__version__]
except ImportError:
msgs += [msg_template.format(package='mock')]
return ' / '.join(msgs)
def get_packages(self):
return [
'matplotlib.tests',
'matplotlib.sphinxext.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/matplotlib/tests/baseline_images')]
return {
'matplotlib':
baseline_images +
[
'tests/mpltest.ttf',
'tests/test_rcparams.rc',
'tests/test_utf32_be_rcparams.rc',
'sphinxext/tests/tinypages/*.rst',
'sphinxext/tests/tinypages/*.py',
'sphinxext/tests/tinypages/_static/*',
]}
class Toolkits_Tests(Tests):
name = "toolkits_tests"
def check_requirements(self):
conf = self.get_config()
toolkits_conf = Toolkits.get_config()
tests_conf = Tests.get_config()
if conf is True:
Tests.force = True
Toolkits.force = True
elif conf == "auto" and not (toolkits_conf and tests_conf):
# Only auto-install if both toolkits and tests are set
# to be installed
raise CheckFailed("toolkits_tests needs 'toolkits' and 'tests'")
return ""
def get_packages(self):
return [
'mpl_toolkits.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/mpl_toolkits/tests/baseline_images')]
return {'mpl_toolkits': baseline_images}
def get_namespace_packages(self):
return ['mpl_toolkits']
class DelayedExtension(Extension, object):
"""
A distutils Extension subclass where some of its members
may have delayed computation until reaching the build phase.
This is so we can, for example, get the Numpy include dirs
after pip has installed Numpy for us if it wasn't already
on the system.
"""
def __init__(self, *args, **kwargs):
super(DelayedExtension, self).__init__(*args, **kwargs)
self._finalized = False
self._hooks = {}
def add_hook(self, member, func):
"""
Add a hook to dynamically compute a member.
Parameters
----------
member : string
The name of the member
func : callable
The function to call to get dynamically-computed values
for the member.
"""
self._hooks[member] = func
def finalize(self):
self._finalized = True
class DelayedMember(property):
def __init__(self, name):
self._name = name
def __get__(self, obj, objtype=None):
result = getattr(obj, '_' + self._name, [])
if obj._finalized:
if self._name in obj._hooks:
result = obj._hooks[self._name]() + result
return result
def __set__(self, obj, value):
setattr(obj, '_' + self._name, value)
include_dirs = DelayedMember('include_dirs')
class Numpy(SetupPackage):
name = "numpy"
@staticmethod
def include_dirs_hook():
if sys.version_info[0] >= 3:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import numpy
reload(numpy)
ext = Extension('test', [])
ext.include_dirs.append(numpy.get_include())
if not has_include_file(
ext.include_dirs, os.path.join("numpy", "arrayobject.h")):
warnings.warn(
"The C headers for numpy could not be found. "
"You may need to install the development package")
return [numpy.get_include()]
def check(self):
min_version = extract_versions()['__version__numpy__']
try:
import numpy
except ImportError:
raise CheckFailed(
"could not be found" )
if not is_min_version(numpy.__version__, min_version):
raise CheckFailed(
"requires numpy %s or later to build. (Found %s)" %
(min_version, numpy.__version__))
return 'version %s' % numpy.__version__
def add_flags(self, ext):
# Ensure that PY_ARRAY_UNIQUE_SYMBOL is uniquely defined for
# each extension
array_api_name = 'MPL_' + ext.name.replace('.', '_') + '_ARRAY_API'
ext.define_macros.append(('PY_ARRAY_UNIQUE_SYMBOL', array_api_name))
ext.add_hook('include_dirs', self.include_dirs_hook)
ext.define_macros.append(('NPY_NO_DEPRECATED_API',
'NPY_1_7_API_VERSION'))
def get_setup_requires(self):
return ['numpy>=1.6']
def get_install_requires(self):
return ['numpy>=1.6']
class LibAgg(SetupPackage):
name = 'libagg'
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libagg', 'agg2/agg_basics.h', min_version='PATCH')
except CheckFailed as e:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext, add_sources=True):
if self.found_external:
pkg_config.setup_extension(ext, 'libagg')
else:
ext.include_dirs.append('extern/agg24-svn/include')
if add_sources:
agg_sources = [
'agg_bezier_arc.cpp',
'agg_curves.cpp',
'agg_image_filters.cpp',
'agg_trans_affine.cpp',
'agg_vcgen_contour.cpp',
'agg_vcgen_dash.cpp',
'agg_vcgen_stroke.cpp',
'agg_vpgen_segmentator.cpp'
]
ext.sources.extend(
os.path.join('extern', 'agg24-svn', 'src', x) for x in agg_sources)
class FreeType(SetupPackage):
name = "freetype"
def check(self):
if sys.platform == 'win32':
check_include_file(get_include_dirs(), 'ft2build.h', 'freetype')
return 'Using unknown version found on system.'
status, output = getstatusoutput("freetype-config --ftversion")
if status == 0:
version = output
else:
version = None
# Early versions of freetype grep badly inside freetype-config,
# so catch those cases. (tested with 2.5.3).
if version is None or 'No such file or directory\ngrep:' in version:
version = self.version_from_header()
# pkg_config returns the libtool version rather than the
# freetype version so we need to explicitly pass the version
# to _check_for_pkg_config
return self._check_for_pkg_config(
'freetype2', 'ft2build.h',
min_version='2.3', version=version)
def version_from_header(self):
version = 'unknown'
ext = self.get_extension()
if ext is None:
return version
# Return the first version found in the include dirs.
for include_dir in ext.include_dirs:
header_fname = os.path.join(include_dir, 'freetype.h')
if os.path.exists(header_fname):
major, minor, patch = 0, 0, 0
with open(header_fname, 'r') as fh:
for line in fh:
if line.startswith('#define FREETYPE_'):
value = line.rsplit(' ', 1)[1].strip()
if 'MAJOR' in line:
major = value
elif 'MINOR' in line:
minor = value
else:
patch = value
return '.'.join([major, minor, patch])
def add_flags(self, ext):
pkg_config.setup_extension(
ext, 'freetype2',
default_include_dirs=[
'include/freetype2', 'freetype2',
'lib/freetype2/include',
'lib/freetype2/include/freetype2'],
default_library_dirs=[
'freetype2/lib'],
default_libraries=['freetype', 'z'])
class FT2Font(SetupPackage):
name = 'ft2font'
def get_extension(self):
sources = [
'src/ft2font.cpp',
'src/ft2font_wrapper.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.ft2font', sources)
FreeType().add_flags(ext)
Numpy().add_flags(ext)
return ext
class Png(SetupPackage):
name = "png"
def check(self):
if sys.platform == 'win32':
check_include_file(get_include_dirs(), 'png.h', 'png')
return 'Using unknown version found on system.'
status, output = getstatusoutput("libpng-config --version")
if status == 0:
version = output
else:
version = None
try:
return self._check_for_pkg_config(
'libpng', 'png.h',
min_version='1.2', version=version)
except CheckFailed as e:
if has_include_file(get_include_dirs(), 'png.h'):
return str(e) + ' Using unknown version found on system.'
raise
def get_extension(self):
sources = [
'src/_png.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib._png', sources)
pkg_config.setup_extension(
ext, 'libpng', default_libraries=['png', 'z'],
alt_exec='libpng-config --ldflags')
Numpy().add_flags(ext)
return ext
class Qhull(SetupPackage):
name = "qhull"
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'qhull', 'qhull/qhull_a.h', min_version='2003.1')
except CheckFailed as e:
self.__class__.found_pkgconfig = False
# Qhull may not be in the pkg-config system but may still be
# present on this system, so check if the header files can be
# found.
include_dirs = [
os.path.join(x, 'qhull') for x in get_include_dirs()]
if has_include_file(include_dirs, 'qhull_a.h'):
return 'Using system Qhull (version unknown, no pkg-config info)'
else:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext):
if self.found_external:
pkg_config.setup_extension(ext, 'qhull',
default_libraries=['qhull'])
else:
ext.include_dirs.append('extern')
ext.sources.extend(glob.glob('extern/qhull/*.c'))
class TTConv(SetupPackage):
name = "ttconv"
def get_extension(self):
sources = [
'src/_ttconv.cpp',
'extern/ttconv/pprdrv_tt.cpp',
'extern/ttconv/pprdrv_tt2.cpp',
'extern/ttconv/ttutil.cpp'
]
ext = make_extension('matplotlib.ttconv', sources)
Numpy().add_flags(ext)
ext.include_dirs.append('extern')
return ext
class Path(SetupPackage):
name = "path"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_path_wrapper.cpp'
]
ext = make_extension('matplotlib._path', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Image(SetupPackage):
name = "image"
def get_extension(self):
sources = [
'src/_image.cpp',
'src/mplutils.cpp',
'src/_image_wrapper.cpp'
]
ext = make_extension('matplotlib._image', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class ContourLegacy(SetupPackage):
name = "contour_legacy"
def get_extension(self):
sources = [
"src/cntr.c"
]
ext = make_extension('matplotlib._cntr', sources)
Numpy().add_flags(ext)
return ext
class Contour(SetupPackage):
name = "contour"
def get_extension(self):
sources = [
"src/_contour.cpp",
"src/_contour_wrapper.cpp",
]
ext = make_extension('matplotlib._contour', sources)
Numpy().add_flags(ext)
return ext
class Delaunay(SetupPackage):
name = "delaunay"
def get_packages(self):
return ['matplotlib.delaunay']
def get_extension(self):
sources = ["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
"delaunay_utils.cpp", "natneighbors.cpp"]
sources = [os.path.join('lib/matplotlib/delaunay', s) for s in sources]
ext = make_extension('matplotlib._delaunay', sources)
Numpy().add_flags(ext)
return ext
class QhullWrap(SetupPackage):
name = "qhull_wrap"
def get_extension(self):
sources = ['src/qhull_wrap.c']
ext = make_extension('matplotlib._qhull', sources,
define_macros=[('MPL_DEVNULL', os.devnull)])
Numpy().add_flags(ext)
Qhull().add_flags(ext)
return ext
class Tri(SetupPackage):
name = "tri"
def get_extension(self):
sources = [
"lib/matplotlib/tri/_tri.cpp",
"lib/matplotlib/tri/_tri_wrapper.cpp",
"src/mplutils.cpp"
]
ext = make_extension('matplotlib._tri', sources)
Numpy().add_flags(ext)
return ext
class Externals(SetupPackage):
name = "externals"
def get_packages(self):
return ['matplotlib.externals']
class Pytz(SetupPackage):
name = "pytz"
def check(self):
try:
import pytz
except ImportError:
raise CheckFailed(
"could not be found")
return "using pytz version %s" % pytz.__version__
def get_install_requires(self):
return ['pytz']
class Cycler(SetupPackage):
name = "cycler"
def check(self):
try:
import cycler
except ImportError:
raise CheckFailed(
"could not be found")
return "using cycler version %s" % cycler.__version__
def get_install_requires(self):
return ['cycler']
class Dateutil(SetupPackage):
name = "dateutil"
def __init__(self, version=None):
self.version = version
def check(self):
try:
import dateutil
except ImportError:
# dateutil 2.1 has a file encoding bug that breaks installation on
# python 3.3
# https://github.com/matplotlib/matplotlib/issues/2373
# hack around the problem by installing the (working) v2.0
#major, minor1, _, _, _ = sys.version_info
#if self.version is None and (major, minor1) == (3, 3):
#self.version = '!=2.1'
raise CheckFailed (
"could not be found")
major, minor1, _, _, _ = sys.version_info
if dateutil.__version__ == '2.1' and (major, minor1) == (3, 3):
raise CheckFailed (
"dateutil v. 2.1 has a bug that breaks installation"
"on python 3.3.x, use another dateutil version")
return "using dateutil version %s" % dateutil.__version__
def get_install_requires(self):
dateutil = 'python-dateutil'
if self.version is not None:
dateutil += self.version
return [dateutil]
class Tornado(SetupPackage):
name = "tornado"
def check(self):
try:
import tornado
except ImportError:
raise CheckFailed (
"could not be found")
return "using tornado version %s" % tornado.version
class Pyparsing(SetupPackage):
name = "pyparsing"
# pyparsing 2.0.4 has broken python 3 support.
# pyparsing 2.1.2 is broken in python3.4/3.3.
def is_ok(self):
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
import pyparsing
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
return f is not None
except (ImportError, TypeError):
return False
def check(self):
try:
import pyparsing
except ImportError:
raise CheckFailed(
"could not be found")
required = [1, 5, 6]
if [int(x) for x in pyparsing.__version__.split('.')] < required:
raise CheckFailed(
"matplotlib requires pyparsing >= {0}".format(
'.'.join(str(x) for x in required)))
if not self.is_ok():
return (
"Your pyparsing contains a bug that will be monkey-patched by "
"matplotlib. For best results, upgrade to pyparsing 2.0.1 or "
"later.")
return "using pyparsing version %s" % pyparsing.__version__
def get_install_requires(self):
versionstring = 'pyparsing>=1.5.6,!=2.0.4,!=2.1.2'
if self.is_ok():
return [versionstring]
else:
return [versionstring + ',!=2.0.0']
class BackendAgg(OptionalBackendPackage):
name = "agg"
force = True
def get_extension(self):
sources = [
"src/mplutils.cpp",
"src/py_converters.cpp",
"src/_backend_agg.cpp",
"src/_backend_agg_wrapper.cpp"
]
ext = make_extension('matplotlib.backends._backend_agg', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
FreeType().add_flags(ext)
return ext
class BackendTkAgg(OptionalBackendPackage):
name = "tkagg"
force = True
def check(self):
return "installing; run-time loading from Python Tcl / Tk"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_tkagg.cpp'
]
ext = make_extension('matplotlib.backends._tkagg', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
LibAgg().add_flags(ext, add_sources=False)
return ext
def add_flags(self, ext):
ext.include_dirs.extend(['src'])
if sys.platform == 'win32':
# PSAPI library needed for finding Tcl / Tk at run time
ext.libraries.extend(['psapi'])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def check(self):
try:
return super(BackendGtkAgg, self).check()
except:
raise
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3agg_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
# Some other error.
success = False
msg = "Could not determine"
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
return (False, "Requires cairocffi or pycairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (RuntimeError, ImportError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Cairo(OptionalBackendPackage):
name = "gtk3cairo"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3cairo_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
success = False
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
class BackendWxAgg(OptionalBackendPackage):
name = "wxagg"
def check_requirements(self):
wxversioninstalled = True
try:
import wxversion
except ImportError:
wxversioninstalled = False
if wxversioninstalled:
try:
_wx_ensure_failed = wxversion.AlreadyImportedError
except AttributeError:
_wx_ensure_failed = wxversion.VersionError
try:
wxversion.ensureMinimal('2.8')
except _wx_ensure_failed:
pass
try:
import wx
backend_version = wx.VERSION_STRING
except ImportError:
raise CheckFailed("requires wxPython")
# Extra version check in case wxversion lacks AlreadyImportedError;
# then VersionError might have been raised and ignored when
# there really *is* a problem with the version.
major, minor = [int(n) for n in backend_version.split('.')[:2]]
if major < 2 or (major < 3 and minor < 8):
raise CheckFailed(
"Requires wxPython 2.8, found %s" % backend_version)
return "version %s" % backend_version
class BackendMacOSX(OptionalBackendPackage):
name = 'macosx'
def check_requirements(self):
if sys.platform != 'darwin':
raise CheckFailed("Mac OS-X only")
return 'darwin'
def get_extension(self):
sources = [
'src/_macosx.m',
'src/py_converters.cpp',
'src/path_cleanup.cpp'
]
ext = make_extension('matplotlib.backends._macosx', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
ext.extra_link_args.extend(['-framework', 'Cocoa'])
return ext
class Windowing(OptionalBackendPackage):
"""
Builds the windowing extension.
"""
name = "windowing"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
config = self.get_config()
if config is False:
raise CheckFailed("skipping due to configuration")
return ""
def get_extension(self):
sources = [
"src/_windowing.cpp"
]
ext = make_extension('matplotlib._windowing', sources)
ext.include_dirs.extend(['C:/include'])
ext.libraries.extend(['user32'])
ext.library_dirs.extend(['C:/lib'])
ext.extra_link_args.append("-mwindows")
return ext
class BackendQtBase(OptionalBackendPackage):
def convert_qt_version(self, version):
version = '%x' % version
temp = []
while len(version) > 0:
version, chunk = version[:-2], version[-2:]
temp.insert(0, str(int(chunk, 16)))
return '.'.join(temp)
def check_requirements(self):
'''
If PyQt4/PyQt5 is already imported, importing PyQt5/PyQt4 will fail
so we need to test in a subprocess (as for Gtk3).
'''
try:
p = multiprocessing.Pool()
except:
# Can't do multiprocessing, fall back to normal approach ( this will fail if importing both PyQt4 and PyQt5 )
try:
# Try in-process
msg = self.callback(self)
except RuntimeError:
raise CheckFailed("Could not import: are PyQt4 & PyQt5 both installed?")
except:
# Raise any other exceptions
raise
else:
# Multiprocessing OK
try:
res = p.map_async(self.callback, [self])
msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
raise CheckFailed("Check timed out")
except:
# Some other error.
p.close()
raise
else:
# Clean exit
p.close()
finally:
# Tidy up multiprocessing
p.join()
return msg
def backend_pyside_internal_check(self):
try:
from PySide import __version__
from PySide import QtCore
except ImportError:
raise CheckFailed("PySide not found")
else:
return ("Qt: %s, PySide: %s" %
(QtCore.__version__, __version__))
def backend_pyqt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed("PyQt4 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
def backend_qt4_internal_check(self):
successes = []
failures = []
try:
successes.append(backend_pyside_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
try:
successes.append(backend_pyqt4_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
if len(successes) == 0:
raise CheckFailed('; '.join(failures))
return '; '.join(successes + failures)
class BackendQt4(BackendQtBase):
name = "qt4agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt4_internal_check
def backend_qt5_internal_check(self):
try:
from PyQt5 import QtCore
except ImportError:
raise CheckFailed("PyQt5 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt5 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
class BackendQt5(BackendQtBase):
name = "qt5agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt5_internal_check
class BackendCairo(OptionalBackendPackage):
name = "cairo"
def check_requirements(self):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
raise CheckFailed("cairocffi or pycairo not found")
else:
return "pycairo version %s" % cairo.version
else:
return "cairocffi version %s" % cairocffi.version
class DviPng(SetupPackage):
name = "dvipng"
optional = True
def check(self):
try:
output = check_output('dvipng -version', shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.splitlines()[1].decode().split()[-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class Ghostscript(SetupPackage):
name = "ghostscript"
optional = True
def check(self):
try:
if sys.platform == 'win32':
command = 'gswin32c --version'
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
command = 'gswin64c --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
else:
command = 'gs --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.decode()[:-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class LaTeX(SetupPackage):
name = "latex"
optional = True
def check(self):
try:
output = check_output('latex -version', shell=True,
stderr=subprocess.STDOUT)
line = output.splitlines()[0].decode()
pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
match = re.search(pattern, line)
return "version %s" % match.group(0)
except (IndexError, ValueError, AttributeError, subprocess.CalledProcessError):
raise CheckFailed()
class PdfToPs(SetupPackage):
name = "pdftops"
optional = True
def check(self):
try:
output = check_output('pdftops -v', shell=True,
stderr=subprocess.STDOUT)
for line in output.splitlines():
line = line.decode()
if 'version' in line:
return "version %s" % line.split()[2]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
class OptionalPackageData(OptionalPackage):
config_category = "package_data"
class Dlls(OptionalPackageData):
"""
On Windows, this packages any DLL files that can be found in the
lib/matplotlib/* directories.
"""
name = "dlls"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
def get_package_data(self):
return {'': ['*.dll']}
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (True)
or opted-out (False).
"""
try:
return config.getboolean(cls.config_category, cls.name)
except:
return False # <-- default
| mit |
Elizaveta239/PyDev.Debugger | pydevd_attach_to_process/winappdbg/util.py | 102 | 36223 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Miscellaneous utility classes and functions.
@group Helpers:
PathOperations,
MemoryAddresses,
CustomAddressIterator,
DataAddressIterator,
ImageAddressIterator,
MappedAddressIterator,
ExecutableAddressIterator,
ReadableAddressIterator,
WriteableAddressIterator,
ExecutableAndWriteableAddressIterator,
DebugRegister,
Regenerator,
BannerHelpFormatter,
StaticClass,
classproperty
"""
__revision__ = "$Id$"
__all__ = [
# Filename and pathname manipulation
'PathOperations',
# Memory address operations
'MemoryAddresses',
'CustomAddressIterator',
'DataAddressIterator',
'ImageAddressIterator',
'MappedAddressIterator',
'ExecutableAddressIterator',
'ReadableAddressIterator',
'WriteableAddressIterator',
'ExecutableAndWriteableAddressIterator',
# Debug registers manipulation
'DebugRegister',
# Miscellaneous
'Regenerator',
]
import sys
import os
import ctypes
import optparse
from winappdbg import win32
from winappdbg import compat
#==============================================================================
class classproperty(property):
"""
Class property method.
Only works for getting properties, if you set them
the symbol gets overwritten in the class namespace.
Inspired on: U{http://stackoverflow.com/a/7864317/426293}
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=""):
if fset is not None or fdel is not None:
raise NotImplementedError()
super(classproperty, self).__init__(fget=classmethod(fget), doc=doc)
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class BannerHelpFormatter(optparse.IndentedHelpFormatter):
"Just a small tweak to optparse to be able to print a banner."
def __init__(self, banner, *argv, **argd):
self.banner = banner
optparse.IndentedHelpFormatter.__init__(self, *argv, **argd)
def format_usage(self, usage):
msg = optparse.IndentedHelpFormatter.format_usage(self, usage)
return '%s\n%s' % (self.banner, msg)
# See Process.generate_memory_snapshot()
class Regenerator(object):
"""
Calls a generator and iterates it. When it's finished iterating, the
generator is called again. This allows you to iterate a generator more
than once (well, sort of).
"""
def __init__(self, g_function, *v_args, **d_args):
"""
@type g_function: function
@param g_function: Function that when called returns a generator.
@type v_args: tuple
@param v_args: Variable arguments to pass to the generator function.
@type d_args: dict
@param d_args: Variable arguments to pass to the generator function.
"""
self.__g_function = g_function
self.__v_args = v_args
self.__d_args = d_args
self.__g_object = None
def __iter__(self):
'x.__iter__() <==> iter(x)'
return self
def next(self):
'x.next() -> the next value, or raise StopIteration'
if self.__g_object is None:
self.__g_object = self.__g_function( *self.__v_args, **self.__d_args )
try:
return self.__g_object.next()
except StopIteration:
self.__g_object = None
raise
class StaticClass (object):
def __new__(cls, *argv, **argd):
"Don't try to instance this class, just use the static methods."
raise NotImplementedError(
"Cannot instance static class %s" % cls.__name__)
#==============================================================================
class PathOperations (StaticClass):
"""
Static methods for filename and pathname manipulation.
"""
@staticmethod
def path_is_relative(path):
"""
@see: L{path_is_absolute}
@type path: str
@param path: Absolute or relative path.
@rtype: bool
@return: C{True} if the path is relative, C{False} if it's absolute.
"""
return win32.PathIsRelative(path)
@staticmethod
def path_is_absolute(path):
"""
@see: L{path_is_relative}
@type path: str
@param path: Absolute or relative path.
@rtype: bool
@return: C{True} if the path is absolute, C{False} if it's relative.
"""
return not win32.PathIsRelative(path)
@staticmethod
def make_relative(path, current = None):
"""
@type path: str
@param path: Absolute path.
@type current: str
@param current: (Optional) Path to the current directory.
@rtype: str
@return: Relative path.
@raise WindowsError: It's impossible to make the path relative.
This happens when the path and the current path are not on the
same disk drive or network share.
"""
return win32.PathRelativePathTo(pszFrom = current, pszTo = path)
@staticmethod
def make_absolute(path):
"""
@type path: str
@param path: Relative path.
@rtype: str
@return: Absolute path.
"""
return win32.GetFullPathName(path)[0]
@staticmethod
def split_extension(pathname):
"""
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return:
Tuple containing the file and extension components of the filename.
"""
filepart = win32.PathRemoveExtension(pathname)
extpart = win32.PathFindExtension(pathname)
return (filepart, extpart)
@staticmethod
def split_filename(pathname):
"""
@type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return: Tuple containing the path to the file and the base filename.
"""
filepart = win32.PathFindFileName(pathname)
pathpart = win32.PathRemoveFileSpec(pathname)
return (pathpart, filepart)
@staticmethod
def split_path(path):
"""
@see: L{join_path}
@type path: str
@param path: Absolute or relative path.
@rtype: list( str... )
@return: List of path components.
"""
components = list()
while path:
next = win32.PathFindNextComponent(path)
if next:
prev = path[ : -len(next) ]
components.append(prev)
path = next
return components
@staticmethod
def join_path(*components):
"""
@see: L{split_path}
@type components: tuple( str... )
@param components: Path components.
@rtype: str
@return: Absolute or relative path.
"""
if components:
path = components[0]
for next in components[1:]:
path = win32.PathAppend(path, next)
else:
path = ""
return path
@staticmethod
def native_to_win32_pathname(name):
"""
@type name: str
@param name: Native (NT) absolute pathname.
@rtype: str
@return: Win32 absolute pathname.
"""
# XXX TODO
# There are probably some native paths that
# won't be converted by this naive approach.
if name.startswith(compat.b("\\")):
if name.startswith(compat.b("\\??\\")):
name = name[4:]
elif name.startswith(compat.b("\\SystemRoot\\")):
system_root_path = os.environ['SYSTEMROOT']
if system_root_path.endswith('\\'):
system_root_path = system_root_path[:-1]
name = system_root_path + name[11:]
else:
for drive_number in compat.xrange(ord('A'), ord('Z') + 1):
drive_letter = '%c:' % drive_number
try:
device_native_path = win32.QueryDosDevice(drive_letter)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror in (win32.ERROR_FILE_NOT_FOUND, \
win32.ERROR_PATH_NOT_FOUND):
continue
raise
if not device_native_path.endswith(compat.b('\\')):
device_native_path += compat.b('\\')
if name.startswith(device_native_path):
name = drive_letter + compat.b('\\') + \
name[ len(device_native_path) : ]
break
return name
@staticmethod
def pathname_to_filename(pathname):
"""
Equivalent to: C{PathOperations.split_filename(pathname)[0]}
@note: This function is preserved for backwards compatibility with
WinAppDbg 1.4 and earlier. It may be removed in future versions.
@type pathname: str
@param pathname: Absolute path to a file.
@rtype: str
@return: Filename component of the path.
"""
return win32.PathFindFileName(pathname)
#==============================================================================
class MemoryAddresses (StaticClass):
"""
Class to manipulate memory addresses.
@type pageSize: int
@cvar pageSize: Page size in bytes. Defaults to 0x1000 but it's
automatically updated on runtime when importing the module.
"""
@classproperty
def pageSize(cls):
"""
Try to get the pageSize value on runtime.
"""
try:
try:
pageSize = win32.GetSystemInfo().dwPageSize
except WindowsError:
pageSize = 0x1000
except NameError:
pageSize = 0x1000
cls.pageSize = pageSize # now this function won't be called again
return pageSize
@classmethod
def align_address_to_page_start(cls, address):
"""
Align the given address to the start of the page it occupies.
@type address: int
@param address: Memory address.
@rtype: int
@return: Aligned memory address.
"""
return address - ( address % cls.pageSize )
@classmethod
def align_address_to_page_end(cls, address):
"""
Align the given address to the end of the page it occupies.
That is, to point to the start of the next page.
@type address: int
@param address: Memory address.
@rtype: int
@return: Aligned memory address.
"""
return address + cls.pageSize - ( address % cls.pageSize )
@classmethod
def align_address_range(cls, begin, end):
"""
Align the given address range to the start and end of the page(s) it occupies.
@type begin: int
@param begin: Memory address of the beginning of the buffer.
Use C{None} for the first legal address in the address space.
@type end: int
@param end: Memory address of the end of the buffer.
Use C{None} for the last legal address in the address space.
@rtype: tuple( int, int )
@return: Aligned memory addresses.
"""
if begin is None:
begin = 0
if end is None:
end = win32.LPVOID(-1).value # XXX HACK
if end < begin:
begin, end = end, begin
begin = cls.align_address_to_page_start(begin)
if end != cls.align_address_to_page_start(end):
end = cls.align_address_to_page_end(end)
return (begin, end)
@classmethod
def get_buffer_size_in_pages(cls, address, size):
"""
Get the number of pages in use by the given buffer.
@type address: int
@param address: Aligned memory address.
@type size: int
@param size: Buffer size.
@rtype: int
@return: Buffer size in number of pages.
"""
if size < 0:
size = -size
address = address - size
begin, end = cls.align_address_range(address, address + size)
# XXX FIXME
# I think this rounding fails at least for address 0xFFFFFFFF size 1
return int(float(end - begin) / float(cls.pageSize))
@staticmethod
def do_ranges_intersect(begin, end, old_begin, old_end):
"""
Determine if the two given memory address ranges intersect.
@type begin: int
@param begin: Start address of the first range.
@type end: int
@param end: End address of the first range.
@type old_begin: int
@param old_begin: Start address of the second range.
@type old_end: int
@param old_end: End address of the second range.
@rtype: bool
@return: C{True} if the two ranges intersect, C{False} otherwise.
"""
return (old_begin <= begin < old_end) or \
(old_begin < end <= old_end) or \
(begin <= old_begin < end) or \
(begin < old_end <= end)
#==============================================================================
def CustomAddressIterator(memory_map, condition):
"""
Generator function that iterates through a memory map, filtering memory
region blocks by any given condition.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@type condition: function
@param condition: Callback function that returns C{True} if the memory
block should be returned, or C{False} if it should be filtered.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
for mbi in memory_map:
if condition(mbi):
address = mbi.BaseAddress
max_addr = address + mbi.RegionSize
while address < max_addr:
yield address
address = address + 1
def DataAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that contain data.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.has_content)
def ImageAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that belong to executable images.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_image)
def MappedAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that belong to memory mapped files.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_mapped)
def ReadableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are readable.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_readable)
def WriteableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are writeable.
@note: Writeable memory is always readable too.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_writeable)
def ExecutableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are executable.
@note: Executable memory is always readable too.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_executable)
def ExecutableAndWriteableAddressIterator(memory_map):
"""
Generator function that iterates through a memory map, returning only those
memory blocks that are executable and writeable.
@note: The presence of such pages make memory corruption vulnerabilities
much easier to exploit.
@type memory_map: list( L{win32.MemoryBasicInformation} )
@param memory_map: List of memory region information objects.
Returned by L{Process.get_memory_map}.
@rtype: generator of L{win32.MemoryBasicInformation}
@return: Generator object to iterate memory blocks.
"""
return CustomAddressIterator(memory_map,
win32.MemoryBasicInformation.is_executable_and_writeable)
#==============================================================================
try:
_registerMask = win32.SIZE_T(-1).value
except TypeError:
if win32.SIZEOF(win32.SIZE_T) == 4:
_registerMask = 0xFFFFFFFF
elif win32.SIZEOF(win32.SIZE_T) == 8:
_registerMask = 0xFFFFFFFFFFFFFFFF
else:
raise
class DebugRegister (StaticClass):
"""
Class to manipulate debug registers.
Used by L{HardwareBreakpoint}.
@group Trigger flags used by HardwareBreakpoint:
BREAK_ON_EXECUTION, BREAK_ON_WRITE, BREAK_ON_ACCESS, BREAK_ON_IO_ACCESS
@group Size flags used by HardwareBreakpoint:
WATCH_BYTE, WATCH_WORD, WATCH_DWORD, WATCH_QWORD
@group Bitwise masks for Dr7:
enableMask, disableMask, triggerMask, watchMask, clearMask,
generalDetectMask
@group Bitwise masks for Dr6:
hitMask, hitMaskAll, debugAccessMask, singleStepMask, taskSwitchMask,
clearDr6Mask, clearHitMask
@group Debug control MSR definitions:
DebugCtlMSR, LastBranchRecord, BranchTrapFlag, PinControl,
LastBranchToIP, LastBranchFromIP,
LastExceptionToIP, LastExceptionFromIP
@type BREAK_ON_EXECUTION: int
@cvar BREAK_ON_EXECUTION: Break on execution.
@type BREAK_ON_WRITE: int
@cvar BREAK_ON_WRITE: Break on write.
@type BREAK_ON_ACCESS: int
@cvar BREAK_ON_ACCESS: Break on read or write.
@type BREAK_ON_IO_ACCESS: int
@cvar BREAK_ON_IO_ACCESS: Break on I/O port access.
Not supported by any hardware.
@type WATCH_BYTE: int
@cvar WATCH_BYTE: Watch a byte.
@type WATCH_WORD: int
@cvar WATCH_WORD: Watch a word.
@type WATCH_DWORD: int
@cvar WATCH_DWORD: Watch a double word.
@type WATCH_QWORD: int
@cvar WATCH_QWORD: Watch one quad word.
@type enableMask: 4-tuple of integers
@cvar enableMask:
Enable bit on C{Dr7} for each slot.
Works as a bitwise-OR mask.
@type disableMask: 4-tuple of integers
@cvar disableMask:
Mask of the enable bit on C{Dr7} for each slot.
Works as a bitwise-AND mask.
@type triggerMask: 4-tuple of 2-tuples of integers
@cvar triggerMask:
Trigger bits on C{Dr7} for each trigger flag value.
Each 2-tuple has the bitwise-OR mask and the bitwise-AND mask.
@type watchMask: 4-tuple of 2-tuples of integers
@cvar watchMask:
Watch bits on C{Dr7} for each watch flag value.
Each 2-tuple has the bitwise-OR mask and the bitwise-AND mask.
@type clearMask: 4-tuple of integers
@cvar clearMask:
Mask of all important bits on C{Dr7} for each slot.
Works as a bitwise-AND mask.
@type generalDetectMask: integer
@cvar generalDetectMask:
General detect mode bit. It enables the processor to notify the
debugger when the debugee is trying to access one of the debug
registers.
@type hitMask: 4-tuple of integers
@cvar hitMask:
Hit bit on C{Dr6} for each slot.
Works as a bitwise-AND mask.
@type hitMaskAll: integer
@cvar hitMaskAll:
Bitmask for all hit bits in C{Dr6}. Useful to know if at least one
hardware breakpoint was hit, or to clear the hit bits only.
@type clearHitMask: integer
@cvar clearHitMask:
Bitmask to clear all the hit bits in C{Dr6}.
@type debugAccessMask: integer
@cvar debugAccessMask:
The debugee tried to access a debug register. Needs bit
L{generalDetectMask} enabled in C{Dr7}.
@type singleStepMask: integer
@cvar singleStepMask:
A single step exception was raised. Needs the trap flag enabled.
@type taskSwitchMask: integer
@cvar taskSwitchMask:
A task switch has occurred. Needs the TSS T-bit set to 1.
@type clearDr6Mask: integer
@cvar clearDr6Mask:
Bitmask to clear all meaningful bits in C{Dr6}.
"""
BREAK_ON_EXECUTION = 0
BREAK_ON_WRITE = 1
BREAK_ON_ACCESS = 3
BREAK_ON_IO_ACCESS = 2
WATCH_BYTE = 0
WATCH_WORD = 1
WATCH_DWORD = 3
WATCH_QWORD = 2
registerMask = _registerMask
#------------------------------------------------------------------------------
###########################################################################
# http://en.wikipedia.org/wiki/Debug_register
#
# DR7 - Debug control
#
# The low-order eight bits of DR7 (0,2,4,6 and 1,3,5,7) selectively enable
# the four address breakpoint conditions. There are two levels of enabling:
# the local (0,2,4,6) and global (1,3,5,7) levels. The local enable bits
# are automatically reset by the processor at every task switch to avoid
# unwanted breakpoint conditions in the new task. The global enable bits
# are not reset by a task switch; therefore, they can be used for
# conditions that are global to all tasks.
#
# Bits 16-17 (DR0), 20-21 (DR1), 24-25 (DR2), 28-29 (DR3), define when
# breakpoints trigger. Each breakpoint has a two-bit entry that specifies
# whether they break on execution (00b), data write (01b), data read or
# write (11b). 10b is defined to mean break on IO read or write but no
# hardware supports it. Bits 18-19 (DR0), 22-23 (DR1), 26-27 (DR2), 30-31
# (DR3), define how large area of memory is watched by breakpoints. Again
# each breakpoint has a two-bit entry that specifies whether they watch
# one (00b), two (01b), eight (10b) or four (11b) bytes.
###########################################################################
# Dr7 |= enableMask[register]
enableMask = (
1 << 0, # Dr0 (bit 0)
1 << 2, # Dr1 (bit 2)
1 << 4, # Dr2 (bit 4)
1 << 6, # Dr3 (bit 6)
)
# Dr7 &= disableMask[register]
disableMask = tuple( [_registerMask ^ x for x in enableMask] ) # The registerMask from the class is not there in py3
try:
del x # It's not there in py3
except:
pass
# orMask, andMask = triggerMask[register][trigger]
# Dr7 = (Dr7 & andMask) | orMask # to set
# Dr7 = Dr7 & andMask # to remove
triggerMask = (
# Dr0 (bits 16-17)
(
((0 << 16), (3 << 16) ^ registerMask), # execute
((1 << 16), (3 << 16) ^ registerMask), # write
((2 << 16), (3 << 16) ^ registerMask), # io read
((3 << 16), (3 << 16) ^ registerMask), # access
),
# Dr1 (bits 20-21)
(
((0 << 20), (3 << 20) ^ registerMask), # execute
((1 << 20), (3 << 20) ^ registerMask), # write
((2 << 20), (3 << 20) ^ registerMask), # io read
((3 << 20), (3 << 20) ^ registerMask), # access
),
# Dr2 (bits 24-25)
(
((0 << 24), (3 << 24) ^ registerMask), # execute
((1 << 24), (3 << 24) ^ registerMask), # write
((2 << 24), (3 << 24) ^ registerMask), # io read
((3 << 24), (3 << 24) ^ registerMask), # access
),
# Dr3 (bits 28-29)
(
((0 << 28), (3 << 28) ^ registerMask), # execute
((1 << 28), (3 << 28) ^ registerMask), # write
((2 << 28), (3 << 28) ^ registerMask), # io read
((3 << 28), (3 << 28) ^ registerMask), # access
),
)
# orMask, andMask = watchMask[register][watch]
# Dr7 = (Dr7 & andMask) | orMask # to set
# Dr7 = Dr7 & andMask # to remove
watchMask = (
# Dr0 (bits 18-19)
(
((0 << 18), (3 << 18) ^ registerMask), # byte
((1 << 18), (3 << 18) ^ registerMask), # word
((2 << 18), (3 << 18) ^ registerMask), # qword
((3 << 18), (3 << 18) ^ registerMask), # dword
),
# Dr1 (bits 22-23)
(
((0 << 23), (3 << 23) ^ registerMask), # byte
((1 << 23), (3 << 23) ^ registerMask), # word
((2 << 23), (3 << 23) ^ registerMask), # qword
((3 << 23), (3 << 23) ^ registerMask), # dword
),
# Dr2 (bits 26-27)
(
((0 << 26), (3 << 26) ^ registerMask), # byte
((1 << 26), (3 << 26) ^ registerMask), # word
((2 << 26), (3 << 26) ^ registerMask), # qword
((3 << 26), (3 << 26) ^ registerMask), # dword
),
# Dr3 (bits 30-31)
(
((0 << 30), (3 << 31) ^ registerMask), # byte
((1 << 30), (3 << 31) ^ registerMask), # word
((2 << 30), (3 << 31) ^ registerMask), # qword
((3 << 30), (3 << 31) ^ registerMask), # dword
),
)
# Dr7 = Dr7 & clearMask[register]
clearMask = (
registerMask ^ ( (1 << 0) + (3 << 16) + (3 << 18) ), # Dr0
registerMask ^ ( (1 << 2) + (3 << 20) + (3 << 22) ), # Dr1
registerMask ^ ( (1 << 4) + (3 << 24) + (3 << 26) ), # Dr2
registerMask ^ ( (1 << 6) + (3 << 28) + (3 << 30) ), # Dr3
)
# Dr7 = Dr7 | generalDetectMask
generalDetectMask = (1 << 13)
###########################################################################
# http://en.wikipedia.org/wiki/Debug_register
#
# DR6 - Debug status
#
# The debug status register permits the debugger to determine which debug
# conditions have occurred. When the processor detects an enabled debug
# exception, it sets the low-order bits of this register (0,1,2,3) before
# entering the debug exception handler.
#
# Note that the bits of DR6 are never cleared by the processor. To avoid
# any confusion in identifying the next debug exception, the debug handler
# should move zeros to DR6 immediately before returning.
###########################################################################
# bool(Dr6 & hitMask[register])
hitMask = (
(1 << 0), # Dr0
(1 << 1), # Dr1
(1 << 2), # Dr2
(1 << 3), # Dr3
)
# bool(Dr6 & anyHitMask)
hitMaskAll = hitMask[0] | hitMask[1] | hitMask[2] | hitMask[3]
# Dr6 = Dr6 & clearHitMask
clearHitMask = registerMask ^ hitMaskAll
# bool(Dr6 & debugAccessMask)
debugAccessMask = (1 << 13)
# bool(Dr6 & singleStepMask)
singleStepMask = (1 << 14)
# bool(Dr6 & taskSwitchMask)
taskSwitchMask = (1 << 15)
# Dr6 = Dr6 & clearDr6Mask
clearDr6Mask = registerMask ^ (hitMaskAll | \
debugAccessMask | singleStepMask | taskSwitchMask)
#------------------------------------------------------------------------------
###############################################################################
#
# (from the AMD64 manuals)
#
# The fields within the DebugCtlMSR register are:
#
# Last-Branch Record (LBR) - Bit 0, read/write. Software sets this bit to 1
# to cause the processor to record the source and target addresses of the
# last control transfer taken before a debug exception occurs. The recorded
# control transfers include branch instructions, interrupts, and exceptions.
#
# Branch Single Step (BTF) - Bit 1, read/write. Software uses this bit to
# change the behavior of the rFLAGS.TF bit. When this bit is cleared to 0,
# the rFLAGS.TF bit controls instruction single stepping, (normal behavior).
# When this bit is set to 1, the rFLAGS.TF bit controls single stepping on
# control transfers. The single-stepped control transfers include branch
# instructions, interrupts, and exceptions. Control-transfer single stepping
# requires both BTF=1 and rFLAGS.TF=1.
#
# Performance-Monitoring/Breakpoint Pin-Control (PBi) - Bits 5-2, read/write.
# Software uses these bits to control the type of information reported by
# the four external performance-monitoring/breakpoint pins on the processor.
# When a PBi bit is cleared to 0, the corresponding external pin (BPi)
# reports performance-monitor information. When a PBi bit is set to 1, the
# corresponding external pin (BPi) reports breakpoint information.
#
# All remaining bits in the DebugCtlMSR register are reserved.
#
# Software can enable control-transfer single stepping by setting
# DebugCtlMSR.BTF to 1 and rFLAGS.TF to 1. The processor automatically
# disables control-transfer single stepping when a debug exception (#DB)
# occurs by clearing DebugCtlMSR.BTF to 0. rFLAGS.TF is also cleared when a
# #DB exception occurs. Before exiting the debug-exception handler, software
# must set both DebugCtlMSR.BTF and rFLAGS.TF to 1 to restart single
# stepping.
#
###############################################################################
DebugCtlMSR = 0x1D9
LastBranchRecord = (1 << 0)
BranchTrapFlag = (1 << 1)
PinControl = (
(1 << 2), # PB1
(1 << 3), # PB2
(1 << 4), # PB3
(1 << 5), # PB4
)
###############################################################################
#
# (from the AMD64 manuals)
#
# Control-transfer recording MSRs: LastBranchToIP, LastBranchFromIP,
# LastExceptionToIP, and LastExceptionFromIP. These registers are loaded
# automatically by the processor when the DebugCtlMSR.LBR bit is set to 1.
# These MSRs are read-only.
#
# The processor automatically disables control-transfer recording when a
# debug exception (#DB) occurs by clearing DebugCtlMSR.LBR to 0. The
# contents of the control-transfer recording MSRs are not altered by the
# processor when the #DB occurs. Before exiting the debug-exception handler,
# software can set DebugCtlMSR.LBR to 1 to re-enable the recording mechanism.
#
###############################################################################
LastBranchToIP = 0x1DC
LastBranchFromIP = 0x1DB
LastExceptionToIP = 0x1DE
LastExceptionFromIP = 0x1DD
#------------------------------------------------------------------------------
@classmethod
def clear_bp(cls, ctx, register):
"""
Clears a hardware breakpoint.
@see: find_slot, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@type register: int
@param register: Slot (debug register) for hardware breakpoint.
"""
ctx['Dr7'] &= cls.clearMask[register]
ctx['Dr%d' % register] = 0
@classmethod
def set_bp(cls, ctx, register, address, trigger, watch):
"""
Sets a hardware breakpoint.
@see: clear_bp, find_slot
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@type register: int
@param register: Slot (debug register).
@type address: int
@param address: Memory address.
@type trigger: int
@param trigger: Trigger flag. See L{HardwareBreakpoint.validTriggers}.
@type watch: int
@param watch: Watch flag. See L{HardwareBreakpoint.validWatchSizes}.
"""
Dr7 = ctx['Dr7']
Dr7 |= cls.enableMask[register]
orMask, andMask = cls.triggerMask[register][trigger]
Dr7 &= andMask
Dr7 |= orMask
orMask, andMask = cls.watchMask[register][watch]
Dr7 &= andMask
Dr7 |= orMask
ctx['Dr7'] = Dr7
ctx['Dr%d' % register] = address
@classmethod
def find_slot(cls, ctx):
"""
Finds an empty slot to set a hardware breakpoint.
@see: clear_bp, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@rtype: int
@return: Slot (debug register) for hardware breakpoint.
"""
Dr7 = ctx['Dr7']
slot = 0
for m in cls.enableMask:
if (Dr7 & m) == 0:
return slot
slot += 1
return None
| epl-1.0 |
mrrrgn/AutobahnPython | examples/twisted/websocket/broadcast/client.py | 18 | 1802 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class BroadcastClientProtocol(WebSocketClientProtocol):
"""
Simple client that connects to a WebSocket server, send a HELLO
message every 2 seconds and print everything it receives.
"""
def sendHello(self):
self.sendMessage("Hello from Python!".encode('utf8'))
reactor.callLater(2, self.sendHello)
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://localhost:9000")
sys.exit(1)
factory = WebSocketClientFactory(sys.argv[1])
factory.protocol = BroadcastClientProtocol
connectWS(factory)
reactor.run()
| apache-2.0 |
Heathckliff/SU2 | SU2_PY/patient_designspace.py | 3 | 3679 | #!/usr/bin/env python
## \file patient_designspace.py
# \brief Python script for running multiple design configurations in multiple sessions
# \author T. Lukaczyk
# \version 4.0.1 "Cardinal"
#
# SU2 Lead Developers: Dr. Francisco Palacios ([email protected]).
# Dr. Thomas D. Economon ([email protected]).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import os, sys, numpy, time, shutil, glob, traceback
from optparse import OptionParser
sys.path.append(os.environ['SU2_RUN'])
import SU2
# human readable time stamper
pretty_time = lambda: time.asctime( time.localtime(time.time()))
# -------------------------------------------------------------------
# Main
# -------------------------------------------------------------------
def main():
# Command Line Options
parser = OptionParser()
parser.add_option("-c", "--config", dest="config_file",
help="read config data from FILE", metavar="FILE")
parser.add_option("-d", "--design", dest="design_file",
help="read design data from FILE", metavar="FILE")
parser.add_option("-p", "--project", dest="project_file",
help="read project data from FILE", metavar="FILE")
parser.add_option("-t", "--transfer", dest="transfer_file",
help="read transfer data from FILE", metavar="FILE")
parser.add_option("-e", "--exchange", dest="exchange_location", default='',
help="optional, SERVER_AND_FOLDER where transfer file can be pulled, example: user@this_server.web:/folder/location/", metavar="SERVER_AND_FOLDER")
parser.add_option("-s", "--hot_start", dest="hot_start", default="False",
help="optional, HOT_START don't initialize design as waiting", metavar="HOT_START")
(options, args)=parser.parse_args()
options.hot_start = options.hot_start == 'True'
# Sample Design Space
SU2.opt.server ( options.config_file ,
options.design_file ,
options.project_file ,
options.transfer_file ,
options.exchange_location ,
options.hot_start )
return
#: def main()
# -------------------------------------------------------------------
# Run Main Program
# -------------------------------------------------------------------
# this is only accessed if running from command prompt
if __name__ == '__main__':
main()
| lgpl-2.1 |
edx/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/formtools/tests.py | 44 | 9574 | import unittest
from django import forms
from django.conf import settings
from django.contrib.formtools import preview, wizard, utils
from django import http
from django.test import TestCase
success_string = "Done was called!"
class TestFormPreview(preview.FormPreview):
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class TestForm(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
class PreviewTests(TestCase):
urls = 'django.contrib.formtools.test_urls'
def setUp(self):
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1':u'foo', 'field1_':u'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can sucessfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/test1/')
stage = self.input % 1
self.assertContains(response, stage, 1)
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1})
response = self.client.post('/test1/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/test1/', self.test_data)
self.failIfEqual(response.content, success_string)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage':2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash':hash, 'bool1':u'False'})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
class SecurityHashTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
class HashTestForm(forms.Form):
name = forms.CharField()
bio = forms.CharField()
class HashTestBlankForm(forms.Form):
name = forms.CharField(required=False)
bio = forms.CharField(required=False)
#
# FormWizard tests
#
class WizardPageOneForm(forms.Form):
field = forms.CharField()
class WizardPageTwoForm(forms.Form):
field = forms.CharField()
class WizardPageTwoAlternativeForm(forms.Form):
field = forms.CharField()
class WizardPageThreeForm(forms.Form):
field = forms.CharField()
class WizardClass(wizard.FormWizard):
def render_template(self, *args, **kw):
return http.HttpResponse("")
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self._dont_enforce_csrf_checks = True
class WizardTests(TestCase):
def setUp(self):
# Use a known SECRET_KEY to make security_hash tests deterministic
self.old_SECRET_KEY = settings.SECRET_KEY
settings.SECRET_KEY = "123"
def tearDown(self):
settings.SECRET_KEY = self.old_SECRET_KEY
def test_step_starts_at_zero(self):
"""
step should be zero for the first form
"""
wizard = WizardClass([WizardPageOneForm, WizardPageTwoForm])
request = DummyRequest()
wizard(request)
self.assertEquals(0, wizard.step)
def test_step_increments(self):
"""
step should be incremented when we go to the next page
"""
wizard = WizardClass([WizardPageOneForm, WizardPageTwoForm])
request = DummyRequest(POST={"0-field":"test", "wizard_step":"0"})
response = wizard(request)
self.assertEquals(1, wizard.step)
def test_14498(self):
"""
Regression test for ticket #14498. All previous steps' forms should be
validated.
"""
that = self
reached = [False]
class WizardWithProcessStep(WizardClass):
def process_step(self, request, form, step):
reached[0] = True
that.assertTrue(hasattr(form, 'cleaned_data'))
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_14576(self):
"""
Regression test for ticket #14576.
The form of the last step is not passed to the done method.
"""
reached = [False]
that = self
class Wizard(WizardClass):
def done(self, request, form_list):
reached[0] = True
that.assertTrue(len(form_list) == 2)
wizard = Wizard([WizardPageOneForm,
WizardPageTwoForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_15075(self):
"""
Regression test for ticket #15075. Allow modifying wizard's form_list
in process_step.
"""
reached = [False]
that = self
class WizardWithProcessStep(WizardClass):
def process_step(self, request, form, step):
if step == 0:
self.form_list[1] = WizardPageTwoAlternativeForm
if step == 1:
that.assertTrue(isinstance(form, WizardPageTwoAlternativeForm))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
| gpl-3.0 |
Tesla-Redux/android_external_skia | tools/tests/bench_pictures_cfg_test.py | 155 | 1340 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that the bench_pictures.cfg file is sane.
"""
import os
import sys
def ThrowIfNotAString(obj):
""" Raise a TypeError if obj is not a string. """
if str(obj) != obj:
raise TypeError('%s is not a string!' % str(obj))
def Main(argv):
""" Verify that the bench_pictures.cfg file is sane.
- Exec the file to ensure that it uses correct Python syntax.
- Make sure that every element is a string, because the buildbot scripts will
fail to execute if this is not the case.
This test does not verify that the well-formed configs are actually valid.
"""
vars = {'import_path': 'tools'}
execfile(os.path.join('tools', 'bench_pictures.cfg'), vars)
bench_pictures_cfg = vars['bench_pictures_cfg']
for config_name, config_list in bench_pictures_cfg.iteritems():
ThrowIfNotAString(config_name)
for config in config_list:
for key, value in config.iteritems():
ThrowIfNotAString(key)
if type(value).__name__ == 'list':
for item in value:
ThrowIfNotAString(item)
elif not value is True:
ThrowIfNotAString(value)
if __name__ == '__main__':
sys.exit(Main(sys.argv)) | bsd-3-clause |
bdcht/amoco | amoco/ui/graphics/qt_/graphwin.py | 1 | 13132 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2020 Axel Tillequin ([email protected])
# published under GPLv2 license
from math import sin,cos,pi,pow,radians
from PySide2.QtCore import (Qt,
Signal,
QPointF,
QRectF,
)
from PySide2.QtGui import (QPen,
QColor,
QBrush,
QFont,
QPainterPath,
QPolygonF,
)
from PySide2.QtWidgets import (QGraphicsScene,
QGraphicsView,
QGraphicsItem,
QGraphicsEllipseItem,
QGraphicsTextItem,
QGraphicsDropShadowEffect,
QGraphicsPathItem,
QMenu,
QAction,
)
__all__ = ['createFuncGraph','GraphScene','GraphView',
'Node_basic', 'Node_codeblock', 'Edge_basic']
def createFuncGraph(self, f):
sc = GraphScene(f.view.layout)
w = GraphView(sc)
sc.Draw()
return w
class GraphScene(QGraphicsScene):
def __init__(self, sug=None):
super().__init__()
p = QPen()
p.setColor(QColor("red"))
self.addLine(-5, 0, 5, 0, p)
self.addLine(0, -5, 0, 5, p)
self.sug = sug
if self.sug:
from grandalf.routing import route_with_lines
self.sug.route_edge = route_with_lines
self.sug.dx, self.sug.dy = 5, 5
self.sug.dirvh = 0
for n in self.sug.g.sV:
self.connect_add(n.view)
for e in self.sug.g.sE:
e.view = Edge_basic(e.v[0].view.obj, e.v[1].view.obj)
self.addItem(e.view)
def connect_add(self, nv):
self.addItem(nv.obj)
def Draw(self, N=1, stepflag=False, constrained=False, opt=False):
self.sug.init_all()
if stepflag:
self.drawer = self.sug.draw_step()
self.greens = []
else:
self.sug.draw(N)
for e in self.sug.alt_e:
e.view.set_properties(stroke_color="red")
for e in self.sug.g.sE:
# self.parent.root.add_child(e.view)
# move edge start/end to CX points:
e.view.update_points()
# ------------------------------------------------------------------------------
class GraphView(QGraphicsView):
def __init__(self, scene):
super().__init__(scene)
self.setRenderHints(QPainter.Antialiasing)
self.setBackgroundBrush(QBrush(QColor("#fff")))
# self.setViewportUpdateMode(QGraphicsView.BoundingRectViewportUpdate)
# self.setDragMode(QGraphicsView.ScrollHandDrag)
# self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
# self.setResizeAnchor(QGraphicsView.AnchorViewCenter)
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Plus:
self.scaleView(1.2)
elif key == Qt.Key_Minus:
self.scaleView(1 / 1.2)
else:
super().keyPressEvent(event)
def wheelEvent(self, event):
self.scaleView(pow(2.0, -event.angleDelta().y() / 240.0))
def scaleView(self, scaleFactor):
factor = (
self.transform()
.scale(scaleFactor, scaleFactor)
.mapRect(QRectF(0, 0, 1, 1))
.width()
)
if factor < 0.07 or factor > 100:
return
self.scale(scaleFactor, scaleFactor)
# ------------------------------------------------------------------------------
class Node_basic(QGraphicsItem):
"""Node_basic is a QGraphicsItem that represents a function node, used as
a view for a cfg.node of code.func or code.xfunc object.
The object is movable, focusable and accepts mouse-over events.
It is composed of a shadowed circle of radius *r* colored in white,
and a blue label set as the function's name.
Arguments:
name (string): string used as label for the Node_basic.label.
r (int): radius of the Node_basic.el circle.
Attributes:
el (QGraphicsEllipseItem): the cicle object
label (QGraphicsTextItem): the label object
cx (list[Edge_basic]): list of edges views associated with the node
"""
def __init__(self, name="?", r=10):
super().__init__()
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
self.setAcceptHoverEvents(True)
# define circle shape:
w = 2 * r + 2
self.el = QGraphicsEllipseItem(0, 0, w, w)
self.el.setBrush(QBrush(QColor("white")))
shadow = QGraphicsDropShadowEffect()
shadow.setOffset(4)
self.el.setGraphicsEffect(shadow)
self.el.setParentItem(self)
# define node label shape:
self.label = QGraphicsTextItem(name)
self.label.setDefaultTextColor(QColor("blue"))
self.label.setFlag(QGraphicsItem.ItemIsSelectable)
self.label.setParentItem(self)
self.el.setZValue(1.0)
self.label.setZValue(2.0)
center = self.center() - self.label.boundingRect().center()
self.label.setPos(self.mapFromScene(center))
self.setZValue(1.0)
self.cx = []
def boundingRect(self):
e = self.el.boundingRect()
l = self.label.boundingRect()
l = self.mapRectToItem(self, l)
return e.united(l)
def shape(self):
e = self.el.shape()
l = self.label.shape()
l = self.mapToItem(self, l)
return e.united(l)
def paint(self, painter, option, widget=None):
pass
def center(self):
return self.el.sceneBoundingRect().center()
def focusOutEvent(self, event):
self.label.setTextInteractionFlags(Qt.NoTextInteraction)
super(Node_basic, self).focusOutEvent(event)
def mouseDoubleClickEvent(self, event):
if self.label.textInteractionFlags() == Qt.NoTextInteraction:
self.label.setTextInteractionFlags(Qt.TextEditorInteraction)
super(Node_basic, self).mouseDoubleClickEvent(event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionHasChanged:
for e in self.cx:
e.update_points()
return super(Node_basic, self).itemChange(change, value)
def contextMenuEvent(self, event):
menu = QMenu()
testAction = QAction("Test", None)
testAction.triggered.connect(self.print_out)
menu.addAction(testAction)
menu.exec_(event.screenPos())
def print_out(self):
print("Triggered")
# ------------------------------------------------------------------------------
class Node_codeblock(QGraphicsItem):
"""Node_codeblock is a QGraphicsItem that represents a block node, used as a
view for a cfg.node of code.block object.
The object is movable, focusable and accepts mouse-over events.
It is composed of a shadowed rectangle (QGraphicsRectItem) that contains
a text block (QGraphicsTextItem) with the assembly instructions formatted
as an Html source for pretty printing.
Arguments:
html (str): the HTML representation of a block of instructions
Attributes:
codebox (QGraphicsRectItem): the shadowed rectangular background
code (QGraphicsTextItem): the assembly text of the input block
cx (list[Edge_basic]): list of edges views associated with the node
"""
def __init__(self, block):
super().__init__()
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
self.setAcceptHoverEvents(True)
# define code text shape:
self.code = QGraphicsTextItem()
self.code.setHtml(block)
f = QFont("Monospace")
f.setPointSize(8)
self.code.setFont(f)
self.code.setParentItem(self)
# define circle shape:
self.codebox = QGraphicsRectItem(
self.code.boundingRect().adjusted(-2, -2, 2, 2)
)
self.codebox.setBrush(QBrush(QColor("#fdf6e3")))
shadow = QGraphicsDropShadowEffect()
shadow.setOffset(4)
self.codebox.setGraphicsEffect(shadow)
self.codebox.setParentItem(self)
self.codebox.setZValue(1.0)
self.code.setZValue(2.0)
center = (
self.codebox.boundingRect().center() - self.code.boundingRect().center()
)
self.code.setPos(center)
self.setZValue(1.0)
self.cx = []
def boundingRect(self):
b = self.codebox.boundingRect()
return b
def center(self):
return self.codebox.sceneBoundingRect().center()
def shape(self):
return self.codebox.shape()
def paint(self, painter, option, widget=None):
pass
def hoverEnterEvent(self, event):
self.codebox.setBrush(QBrush(QColor("white")))
super().hoverEnterEvent(event)
def hoverLeaveEvent(self, event):
self.codebox.setBrush(QBrush(QColor("#fdf6e3")))
self.code.setTextInteractionFlags(Qt.NoTextInteraction)
super().hoverLeaveEvent(event)
def mouseDoubleClickEvent(self, event):
if self.code.textInteractionFlags() == Qt.NoTextInteraction:
self.code.setTextInteractionFlags(Qt.TextEditorInteraction)
super().mouseDoubleClickEvent(event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionHasChanged:
for e in self.cx:
e.update_points()
return super().itemChange(change, value)
# ------------------------------------------------------------------------------
class Edge_basic(QGraphicsItem):
"""Edge_basic is a QGraphicsItem that represents an edge, used as a
view for a cfg.Edge object.
The object is not movable or focusable but should accept mouse
events to highlight or tag the nodes of this edge.
It is composed of a QGraphicsPathItem build from self.points
and a triangular arrow head positioned at the border of the node's
view. It should react to nodes n0/n1 displacements.
Arguments:
n0 (Node_codeblock|Node_basic): first node (from).
n1 (Node_codeblock|Node_basic): second node (to).
Attributes:
n (list): the list of node views.
points (list[QPointF]): list of points for routing the edge.
head (QPolygonF): the arrow head polygon.
"""
def __init__(self, n0, n1):
super().__init__()
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges)
self.n = [n0, n1]
n0.cx.append(self)
n1.cx.append(self)
self.points = [None, None]
self.head = None
self.update_points()
def setpath(self, l):
self.points = [QPointF(*p) for p in l]
self.update_points()
def update_points(self):
self.prepareGeometryChange()
self.points[0] = self.n[0].center()
self.points[-1] = self.n[1].center()
self.adjust()
def adjust(self):
nend = self.n[1]
nendshape = nend.shape()
s = nend.mapToScene(nendshape)
x = s.intersected(self.shape())
self.points[-1] = x.pointAtPercent(1.0)
def boundingRect(self):
br = self.getqgp().boundingRect()
if self.head:
br = br.united(self.head.boundingRect())
return br
def getqgp(self):
"""Compute the QGraphicsPathItem that represents the open
polygonal line going through all self.points.
"""
qpp = QPainterPath(self.points[0])
for p in self.points[1:]:
qpp.lineTo(p)
return QGraphicsPathItem(qpp)
def shape(self):
s = self.getqgp().shape()
if self.head:
s.addPolygon(self.head)
return s
def paint(self, painter, option, widget=None):
qgp = self.getqgp()
pen = QPen()
pen.setWidth(2)
qgp.setPen(pen)
qgp.setBrush(QBrush(Qt.NoBrush))
painter.setClipRect(option.exposedRect)
qgp.paint(painter, option, widget)
lastp = self.points[-1]
angle = radians(qgp.path().angleAtPercent(1.0))
angle = angle + pi
p = lastp + QPointF(cos(angle - pi / 6.0) * 10, -sin(angle - pi / 6.0) * 10)
q = lastp + QPointF(cos(angle + pi / 6.0) * 10, -sin(angle + pi / 6.0) * 10)
painter.setBrush(QBrush(QColor("black")))
self.head = QPolygonF([lastp, p, q])
painter.drawPolygon(self.head)
| gpl-2.0 |
Lapotor/libretime | dev_tools/compare_cc_files_to_fs.py | 10 | 5060 | import os
import time
import shutil
import sys
import logging
from configobj import ConfigObj
from subprocess import Popen, PIPE
from api_clients import api_client as apc
"""
The purpose of this script is that you can run it, and it will compare what the database has to what your filesystem
has. It will then report if there are any differences. It will *NOT* make any changes, unlike media-monitor which uses
similar code when it starts up (but then makes changes if something is different)
"""
class AirtimeMediaMonitorBootstrap():
"""AirtimeMediaMonitorBootstrap constructor
Keyword Arguments:
logger -- reference to the media-monitor logging facility
pe -- reference to an instance of ProcessEvent
api_clients -- reference of api_clients to communicate with airtime-server
"""
def __init__(self):
config = ConfigObj('/etc/airtime/airtime.conf')
self.api_client = apc.api_client_factory(config)
"""
try:
logging.config.fileConfig("logging.cfg")
except Exception, e:
print 'Error configuring logging: ', e
sys.exit(1)
"""
self.logger = logging.getLogger()
self.logger.info("Adding %s on watch list...", "xxx")
self.scan()
"""On bootup we want to scan all directories and look for files that
weren't there or files that changed before media-monitor process
went offline.
"""
def scan(self):
directories = self.get_list_of_watched_dirs();
self.logger.info("watched directories found: %s", directories)
for id, dir in directories.iteritems():
self.logger.debug("%s, %s", id, dir)
#CHANGED!!!
#self.sync_database_to_filesystem(id, api_client.encode_to(dir, "utf-8"))
self.sync_database_to_filesystem(id, dir)
"""Gets a list of files that the Airtime database knows for a specific directory.
You need to provide the directory's row ID, which is obtained when calling
get_list_of_watched_dirs function.
dir_id -- row id of the directory in the cc_watched_dirs database table
"""
def list_db_files(self, dir_id):
return self.api_client.list_all_db_files(dir_id)
"""
returns the path and the database row id for this path for all watched directories. Also
returns the Stor directory, which can be identified by its row id (always has value of "1")
"""
def get_list_of_watched_dirs(self):
json = self.api_client.list_all_watched_dirs()
return json["dirs"]
def scan_dir_for_existing_files(self, dir):
command = 'find "%s" -type f -iname "*.ogg" -o -iname "*.mp3" -readable' % dir.replace('"', '\\"')
self.logger.debug(command)
#CHANGED!!
stdout = self.exec_command(command).decode("UTF-8")
return stdout.splitlines()
def exec_command(self, command):
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
self.logger.warn("command \n%s\n return with a non-zero return value", command)
self.logger.error(stderr)
return stdout
"""
This function takes in a path name provided by the database (and its corresponding row id)
and reads the list of files in the local file system. Its purpose is to discover which files
exist on the file system but not in the database and vice versa, as well as which files have
been modified since the database was last updated. In each case, this method will call an
appropiate method to ensure that the database actually represents the filesystem.
dir_id -- row id of the directory in the cc_watched_dirs database table
dir -- pathname of the directory
"""
def sync_database_to_filesystem(self, dir_id, dir):
"""
set to hold new and/or modified files. We use a set to make it ok if files are added
twice. This is because some of the tests for new files return result sets that are not
mutually exclusive from each other.
"""
db_known_files_set = set()
files = self.list_db_files(dir_id)
for file in files['files']:
db_known_files_set.add(file)
existing_files = self.scan_dir_for_existing_files(dir)
existing_files_set = set()
for file_path in existing_files:
if len(file_path.strip(" \n")) > 0:
existing_files_set.add(file_path[len(dir):])
deleted_files_set = db_known_files_set - existing_files_set
new_files_set = existing_files_set - db_known_files_set
print ("DB Known files: \n%s\n\n"%len(db_known_files_set))
print ("FS Known files: \n%s\n\n"%len(existing_files_set))
print ("Deleted files: \n%s\n\n"%deleted_files_set)
print ("New files: \n%s\n\n"%new_files_set)
if __name__ == "__main__":
AirtimeMediaMonitorBootstrap()
| agpl-3.0 |
septicmk/MEHI | test/test_p_fusion.py | 1 | 1315 | ################################
# Author : septicmk
# Date : 2015/07/24 20:08:44
# FileName : test_p_fusion.py
################################
from MEHI.paralleled.fusion import *
from MEHI.paralleled.IO import load_tiff
from test_utils import PySparkTestCase
import numpy as np
import os
L_pwd = os.path.abspath('.') + '/test_data/L_side/'
R_pwd = os.path.abspath('.') + '/test_data/R_side/'
class PySparkTestFusionCase(PySparkTestCase):
def setUp(self):
super(PySparkTestFusionCase, self).setUp()
self.L_imgs = load_tiff(self.sc, L_pwd)
self.R_imgs = load_tiff(self.sc, R_pwd)
def tearDown(self):
super(PySparkTestFusionCase, self).tearDown()
class TestParalleledFusion(PySparkTestFusionCase):
def test_content_fusion(self):
img_stack = zip(self.L_imgs, self.R_imgs)
rdd = self.sc.parallelize(img_stack)
fused_img = content_fusion(rdd)
assert (fused_img.dtype == self.L_imgs.dtype)
assert (fused_img.shape == self.L_imgs.shape)
def test_wavelet_fusion(self):
img_stack = zip(self.L_imgs, self.R_imgs)
rdd = self.sc.parallelize(img_stack)
fused_img = wavelet_fusion(rdd)
assert (fused_img.dtype == self.L_imgs.dtype)
assert (fused_img.shape == self.L_imgs.shape)
| bsd-3-clause |
Therp/odoo | addons/project/wizard/__init__.py | 381 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_task_delegate
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
profjrr/scrapy | tests/test_settings/__init__.py | 98 | 12165 | import six
import unittest
import warnings
from scrapy.settings import Settings, SettingsAttribute, CrawlerSettings
from tests import mock
from . import default_settings
class SettingsAttributeTest(unittest.TestCase):
def setUp(self):
self.attribute = SettingsAttribute('value', 10)
def test_set_greater_priority(self):
self.attribute.set('value2', 20)
self.assertEqual(self.attribute.value, 'value2')
self.assertEqual(self.attribute.priority, 20)
def test_set_equal_priority(self):
self.attribute.set('value2', 10)
self.assertEqual(self.attribute.value, 'value2')
self.assertEqual(self.attribute.priority, 10)
def test_set_less_priority(self):
self.attribute.set('value2', 0)
self.assertEqual(self.attribute.value, 'value')
self.assertEqual(self.attribute.priority, 10)
class SettingsTest(unittest.TestCase):
if six.PY3:
assertItemsEqual = unittest.TestCase.assertCountEqual
def setUp(self):
self.settings = Settings()
@mock.patch.dict('scrapy.settings.SETTINGS_PRIORITIES', {'default': 10})
@mock.patch('scrapy.settings.default_settings', default_settings)
def test_initial_defaults(self):
settings = Settings()
self.assertEqual(len(settings.attributes), 1)
self.assertIn('TEST_DEFAULT', settings.attributes)
attr = settings.attributes['TEST_DEFAULT']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'defvalue')
self.assertEqual(attr.priority, 10)
@mock.patch.dict('scrapy.settings.SETTINGS_PRIORITIES', {})
@mock.patch('scrapy.settings.default_settings', {})
def test_initial_values(self):
settings = Settings({'TEST_OPTION': 'value'}, 10)
self.assertEqual(len(settings.attributes), 1)
self.assertIn('TEST_OPTION', settings.attributes)
attr = settings.attributes['TEST_OPTION']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'value')
self.assertEqual(attr.priority, 10)
def test_set_new_attribute(self):
self.settings.attributes = {}
self.settings.set('TEST_OPTION', 'value', 0)
self.assertIn('TEST_OPTION', self.settings.attributes)
attr = self.settings.attributes['TEST_OPTION']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'value')
self.assertEqual(attr.priority, 0)
def test_set_instance_identity_on_update(self):
attr = SettingsAttribute('value', 0)
self.settings.attributes = {'TEST_OPTION': attr}
self.settings.set('TEST_OPTION', 'othervalue', 10)
self.assertIn('TEST_OPTION', self.settings.attributes)
self.assertIs(attr, self.settings.attributes['TEST_OPTION'])
def test_set_calls_settings_attributes_methods_on_update(self):
with mock.patch.object(SettingsAttribute, '__setattr__') as mock_setattr, \
mock.patch.object(SettingsAttribute, 'set') as mock_set:
attr = SettingsAttribute('value', 10)
self.settings.attributes = {'TEST_OPTION': attr}
mock_set.reset_mock()
mock_setattr.reset_mock()
for priority in (0, 10, 20):
self.settings.set('TEST_OPTION', 'othervalue', priority)
mock_set.assert_called_once_with('othervalue', priority)
self.assertFalse(mock_setattr.called)
mock_set.reset_mock()
mock_setattr.reset_mock()
def test_setdict_alias(self):
with mock.patch.object(self.settings, 'set') as mock_set:
self.settings.setdict({'TEST_1': 'value1', 'TEST_2': 'value2'}, 10)
self.assertEqual(mock_set.call_count, 2)
calls = [mock.call('TEST_1', 'value1', 10),
mock.call('TEST_2', 'value2', 10)]
mock_set.assert_has_calls(calls, any_order=True)
def test_setmodule_only_load_uppercase_vars(self):
class ModuleMock():
UPPERCASE_VAR = 'value'
MIXEDcase_VAR = 'othervalue'
lowercase_var = 'anothervalue'
self.settings.attributes = {}
self.settings.setmodule(ModuleMock(), 10)
self.assertIn('UPPERCASE_VAR', self.settings.attributes)
self.assertNotIn('MIXEDcase_VAR', self.settings.attributes)
self.assertNotIn('lowercase_var', self.settings.attributes)
self.assertEqual(len(self.settings.attributes), 1)
def test_setmodule_alias(self):
with mock.patch.object(self.settings, 'set') as mock_set:
self.settings.setmodule(default_settings, 10)
mock_set.assert_called_with('TEST_DEFAULT', 'defvalue', 10)
def test_setmodule_by_path(self):
self.settings.attributes = {}
self.settings.setmodule(default_settings, 10)
ctrl_attributes = self.settings.attributes.copy()
self.settings.attributes = {}
self.settings.setmodule(
'tests.test_settings.default_settings', 10)
self.assertItemsEqual(six.iterkeys(self.settings.attributes),
six.iterkeys(ctrl_attributes))
for attr, ctrl_attr in zip(six.itervalues(self.settings.attributes),
six.itervalues(ctrl_attributes)):
self.assertEqual(attr.value, ctrl_attr.value)
self.assertEqual(attr.priority, ctrl_attr.priority)
def test_get(self):
test_configuration = {
'TEST_ENABLED1': '1',
'TEST_ENABLED2': True,
'TEST_ENABLED3': 1,
'TEST_DISABLED1': '0',
'TEST_DISABLED2': False,
'TEST_DISABLED3': 0,
'TEST_INT1': 123,
'TEST_INT2': '123',
'TEST_FLOAT1': 123.45,
'TEST_FLOAT2': '123.45',
'TEST_LIST1': ['one', 'two'],
'TEST_LIST2': 'one,two',
'TEST_STR': 'value',
'TEST_DICT1': {'key1': 'val1', 'ke2': 3},
'TEST_DICT2': '{"key1": "val1", "ke2": 3}',
}
settings = self.settings
settings.attributes = {key: SettingsAttribute(value, 0) for key, value
in six.iteritems(test_configuration)}
self.assertTrue(settings.getbool('TEST_ENABLED1'))
self.assertTrue(settings.getbool('TEST_ENABLED2'))
self.assertTrue(settings.getbool('TEST_ENABLED3'))
self.assertFalse(settings.getbool('TEST_ENABLEDx'))
self.assertTrue(settings.getbool('TEST_ENABLEDx', True))
self.assertFalse(settings.getbool('TEST_DISABLED1'))
self.assertFalse(settings.getbool('TEST_DISABLED2'))
self.assertFalse(settings.getbool('TEST_DISABLED3'))
self.assertEqual(settings.getint('TEST_INT1'), 123)
self.assertEqual(settings.getint('TEST_INT2'), 123)
self.assertEqual(settings.getint('TEST_INTx'), 0)
self.assertEqual(settings.getint('TEST_INTx', 45), 45)
self.assertEqual(settings.getfloat('TEST_FLOAT1'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOAT2'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOATx'), 0.0)
self.assertEqual(settings.getfloat('TEST_FLOATx', 55.0), 55.0)
self.assertEqual(settings.getlist('TEST_LIST1'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LIST2'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LISTx'), [])
self.assertEqual(settings.getlist('TEST_LISTx', ['default']), ['default'])
self.assertEqual(settings['TEST_STR'], 'value')
self.assertEqual(settings.get('TEST_STR'), 'value')
self.assertEqual(settings['TEST_STRx'], None)
self.assertEqual(settings.get('TEST_STRx'), None)
self.assertEqual(settings.get('TEST_STRx', 'default'), 'default')
self.assertEqual(settings.getdict('TEST_DICT1'), {'key1': 'val1', 'ke2': 3})
self.assertEqual(settings.getdict('TEST_DICT2'), {'key1': 'val1', 'ke2': 3})
self.assertEqual(settings.getdict('TEST_DICT3'), {})
self.assertEqual(settings.getdict('TEST_DICT3', {'key1': 5}), {'key1': 5})
self.assertRaises(ValueError, settings.getdict, 'TEST_LIST1')
def test_copy(self):
values = {
'TEST_BOOL': True,
'TEST_LIST': ['one', 'two'],
'TEST_LIST_OF_LISTS': [['first_one', 'first_two'],
['second_one', 'second_two']]
}
self.settings.setdict(values)
copy = self.settings.copy()
self.settings.set('TEST_BOOL', False)
self.assertTrue(copy.get('TEST_BOOL'))
test_list = self.settings.get('TEST_LIST')
test_list.append('three')
self.assertListEqual(copy.get('TEST_LIST'), ['one', 'two'])
test_list_of_lists = self.settings.get('TEST_LIST_OF_LISTS')
test_list_of_lists[0].append('first_three')
self.assertListEqual(copy.get('TEST_LIST_OF_LISTS')[0],
['first_one', 'first_two'])
def test_freeze(self):
self.settings.freeze()
with self.assertRaises(TypeError) as cm:
self.settings.set('TEST_BOOL', False)
self.assertEqual(str(cm.exception),
"Trying to modify an immutable Settings object")
def test_frozencopy(self):
frozencopy = self.settings.frozencopy()
self.assertTrue(frozencopy.frozen)
self.assertIsNot(frozencopy, self.settings)
def test_deprecated_attribute_overrides(self):
self.settings.set('BAR', 'fuz', priority='cmdline')
with warnings.catch_warnings(record=True) as w:
self.settings.overrides['BAR'] = 'foo'
self.assertIn("Settings.overrides", str(w[0].message))
self.assertEqual(self.settings.get('BAR'), 'foo')
self.assertEqual(self.settings.overrides.get('BAR'), 'foo')
self.assertIn('BAR', self.settings.overrides)
self.settings.overrides.update(BAR='bus')
self.assertEqual(self.settings.get('BAR'), 'bus')
self.assertEqual(self.settings.overrides.get('BAR'), 'bus')
self.settings.overrides.setdefault('BAR', 'fez')
self.assertEqual(self.settings.get('BAR'), 'bus')
self.settings.overrides.setdefault('FOO', 'fez')
self.assertEqual(self.settings.get('FOO'), 'fez')
self.assertEqual(self.settings.overrides.get('FOO'), 'fez')
def test_deprecated_attribute_defaults(self):
self.settings.set('BAR', 'fuz', priority='default')
with warnings.catch_warnings(record=True) as w:
self.settings.defaults['BAR'] = 'foo'
self.assertIn("Settings.defaults", str(w[0].message))
self.assertEqual(self.settings.get('BAR'), 'foo')
self.assertEqual(self.settings.defaults.get('BAR'), 'foo')
self.assertIn('BAR', self.settings.defaults)
class CrawlerSettingsTest(unittest.TestCase):
def test_deprecated_crawlersettings(self):
def _get_settings(settings_dict=None):
settings_module = type('SettingsModuleMock', (object,), settings_dict or {})
return CrawlerSettings(settings_module)
with warnings.catch_warnings(record=True) as w:
settings = _get_settings()
self.assertIn("CrawlerSettings is deprecated", str(w[0].message))
# test_global_defaults
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 180)
# test_defaults
settings.defaults['DOWNLOAD_TIMEOUT'] = '99'
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 99)
# test_settings_module
settings = _get_settings({'DOWNLOAD_TIMEOUT': '3'})
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 3)
# test_overrides
settings = _get_settings({'DOWNLOAD_TIMEOUT': '3'})
settings.overrides['DOWNLOAD_TIMEOUT'] = '15'
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 15)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
lahosken/pants | contrib/node/src/python/pants/contrib/node/targets/node_package.py | 18 | 1308 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
class NodePackage(Target):
"""Represents an abstract Node package, currently handled by NPM internally.
All Node packages have a package name whether they are local or remote so this serves as a base
class for all concrete manifestations of packages.
"""
def __init__(self, package_name=None, address=None, payload=None, **kwargs):
"""
:param string package_name: The remote module package name, if not supplied the target name is
used.
"""
payload = payload or Payload()
payload.add_fields({
'package_name': PrimitiveField(package_name or address.target_name),
})
super(NodePackage, self).__init__(address=address, payload=payload, **kwargs)
@property
def package_name(self):
"""The name of the remote module package.
:rtype: string
"""
return self.payload.package_name
| apache-2.0 |
mdavid/zulip | analytics/management/commands/realm_stats.py | 113 | 7859 | from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from django.db.models import Count
from zerver.models import UserProfile, Realm, Stream, Message, Recipient, UserActivity, \
Subscription, UserMessage
MOBILE_CLIENT_LIST = ["Android", "ios"]
HUMAN_CLIENT_LIST = MOBILE_CLIENT_LIST + ["website"]
human_messages = Message.objects.filter(sending_client__name__in=HUMAN_CLIENT_LIST)
class Command(BaseCommand):
help = "Generate statistics on realm activity."
def add_arguments(self, parser):
parser.add_argument('realms', metavar='<realm>', type=str, nargs='*',
help="realm to generate statistics for")
def active_users(self, realm):
# Has been active (on the website, for now) in the last 7 days.
activity_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=7)
return [activity.user_profile for activity in \
UserActivity.objects.filter(user_profile__realm=realm,
user_profile__is_active=True,
last_visit__gt=activity_cutoff,
query="/json/update_pointer",
client__name="website")]
def messages_sent_by(self, user, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return human_messages.filter(sender=user, pub_date__gt=sent_time_cutoff).count()
def total_messages(self, realm, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return Message.objects.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).count()
def human_messages(self, realm, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).count()
def api_messages(self, realm, days_ago):
return (self.total_messages(realm, days_ago) - self.human_messages(realm, days_ago))
def stream_messages(self, realm, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff,
recipient__type=Recipient.STREAM).count()
def private_messages(self, realm, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).exclude(
recipient__type=Recipient.STREAM).exclude(recipient__type=Recipient.HUDDLE).count()
def group_private_messages(self, realm, days_ago):
sent_time_cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=days_ago)
return human_messages.filter(sender__realm=realm, pub_date__gt=sent_time_cutoff).exclude(
recipient__type=Recipient.STREAM).exclude(recipient__type=Recipient.PERSONAL).count()
def report_percentage(self, numerator, denominator, text):
if not denominator:
fraction = 0.0
else:
fraction = numerator / float(denominator)
print "%.2f%% of" % (fraction * 100,), text
def handle(self, *args, **options):
if options['realms']:
try:
realms = [Realm.objects.get(domain=domain) for domain in options['realms']]
except Realm.DoesNotExist, e:
print e
exit(1)
else:
realms = Realm.objects.all()
for realm in realms:
print realm.domain
user_profiles = UserProfile.objects.filter(realm=realm, is_active=True)
active_users = self.active_users(realm)
num_active = len(active_users)
print "%d active users (%d total)" % (num_active, len(user_profiles))
streams = Stream.objects.filter(realm=realm).extra(
tables=['zerver_subscription', 'zerver_recipient'],
where=['zerver_subscription.recipient_id = zerver_recipient.id',
'zerver_recipient.type = 2',
'zerver_recipient.type_id = zerver_stream.id',
'zerver_subscription.active = true']).annotate(count=Count("name"))
print "%d streams" % (streams.count(),)
for days_ago in (1, 7, 30):
print "In last %d days, users sent:" % (days_ago,)
sender_quantities = [self.messages_sent_by(user, days_ago) for user in user_profiles]
for quantity in sorted(sender_quantities, reverse=True):
print quantity,
print ""
print "%d stream messages" % (self.stream_messages(realm, days_ago),)
print "%d one-on-one private messages" % (self.private_messages(realm, days_ago),)
print "%d messages sent via the API" % (self.api_messages(realm, days_ago),)
print "%d group private messages" % (self.group_private_messages(realm, days_ago),)
num_notifications_enabled = len(filter(lambda x: x.enable_desktop_notifications == True,
active_users))
self.report_percentage(num_notifications_enabled, num_active,
"active users have desktop notifications enabled")
num_enter_sends = len(filter(lambda x: x.enter_sends, active_users))
self.report_percentage(num_enter_sends, num_active,
"active users have enter-sends")
all_message_count = human_messages.filter(sender__realm=realm).count()
multi_paragraph_message_count = human_messages.filter(
sender__realm=realm, content__contains="\n\n").count()
self.report_percentage(multi_paragraph_message_count, all_message_count,
"all messages are multi-paragraph")
# Starred messages
starrers = UserMessage.objects.filter(user_profile__in=user_profiles,
flags=UserMessage.flags.starred).values(
"user_profile").annotate(count=Count("user_profile"))
print "%d users have starred %d messages" % (
len(starrers), sum([elt["count"] for elt in starrers]))
active_user_subs = Subscription.objects.filter(
user_profile__in=user_profiles, active=True)
# Streams not in home view
non_home_view = active_user_subs.filter(in_home_view=False).values(
"user_profile").annotate(count=Count("user_profile"))
print "%d users have %d streams not in home view" % (
len(non_home_view), sum([elt["count"] for elt in non_home_view]))
# Code block markup
markup_messages = human_messages.filter(
sender__realm=realm, content__contains="~~~").values(
"sender").annotate(count=Count("sender"))
print "%d users have used code block markup on %s messages" % (
len(markup_messages), sum([elt["count"] for elt in markup_messages]))
# Notifications for stream messages
notifications = active_user_subs.filter(notifications=True).values(
"user_profile").annotate(count=Count("user_profile"))
print "%d users receive desktop notifications for %d streams" % (
len(notifications), sum([elt["count"] for elt in notifications]))
print ""
| apache-2.0 |
sajeeshcs/nested_quota_latest | nova/virt/libvirt/host.py | 2 | 15703 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import os
import socket
import threading
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a Xen domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
if uri.find("xen://") != -1:
self._lifecycle_delay = 15
else:
self._lifecycle_delay = 0
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_delayed_cleanup(event)
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_delayed_cleanup(self, event):
"""Cleanup possible delayed stop events."""
if (event.transition == virtevent.EVENT_LIFECYCLE_STARTED or
event.transition == virtevent.EVENT_LIFECYCLE_RESUMED):
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
if self._lifecycle_delay > 0:
if event.uuid not in self._events_delayed.keys():
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
| apache-2.0 |
mikanbako/ri_ar | ri_advertiser/test/test_ri_advertiser.py | 1 | 1126 | # Copyright (c) 2016 Keita Kita
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
import unittest
from unittest.mock import MagicMock
from ri_advertiser.ri_advertiser import Gpio, Advertiser, RiAdvertiser
class RiAdvertiserTest(unittest.TestCase):
def setUp(self):
self.__gpio = Gpio()
self.__gpio.initialize()
self.__advertiser = Advertiser()
self.__ri_advertiser = RiAdvertiser(self.__gpio, self.__advertiser)
def tearDown(self):
self.__gpio.release()
def test_wait_for_input(self):
self.__gpio.wait_for_input = MagicMock()
self.__ri_advertiser.wait_for_input()
self.__gpio.wait_for_input.assert_called_once_with()
def test_start_advertise(self):
self.__advertiser.start = MagicMock()
self.__ri_advertiser.start_advertise()
self.__advertiser.start.assert_called_once_with()
def test_stop_advertise(self):
self.__advertiser.stop = MagicMock()
self.__ri_advertiser.stop_advertise()
self.__advertiser.stop.assert_called_once_with()
| mit |
lhfei/spark-in-action | spark-3.x/src/main/python/ml/pca_example.py | 27 | 1510 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import PCA
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PCAExample")\
.getOrCreate()
# $example on$
data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
(Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
(Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
df = spark.createDataFrame(data, ["features"])
pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
model = pca.fit(df)
result = model.transform(df).select("pcaFeatures")
result.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
Meriipu/quodlibet | quodlibet/ext/songsmenu/splitting.py | 4 | 2189 | # Copyright 2005 Joe Wreschnig
# 2016 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet import _
from quodlibet.plugins.songshelpers import any_song
from quodlibet.plugins.songsmenu import SongsMenuPlugin
from quodlibet.util.string.splitters import split_title, split_album
from quodlibet.qltk import Icons
def has_album_splittable(song):
return ("album" in song and
"discnumber" not in song and
song.can_change("album") and
song.can_change("discnumber"))
def has_title_splittable(song):
return ("title" in song and
song.can_change("title") and
song.can_change("version"))
class SplitTags(SongsMenuPlugin):
PLUGIN_ID = "Split Tags"
PLUGIN_NAME = _("Split Tags")
PLUGIN_DESC = _("Splits the disc number from the album and the version "
"from the title at the same time.")
PLUGIN_ICON = Icons.EDIT_FIND_REPLACE
plugin_handles = any_song(has_title_splittable)
def plugin_song(self, song):
if has_title_splittable(song):
title, versions = split_title(song["title"])
if title:
song["title"] = title
if versions:
song["version"] = "\n".join(versions)
if has_album_splittable(song):
album, disc = split_album(song["album"])
if album:
song["album"] = album
if disc:
song["discnumber"] = disc
class SplitAlbum(SongsMenuPlugin):
PLUGIN_ID = "Split Album"
PLUGIN_NAME = _("Split Album")
PLUGIN_DESC = _("Split out disc number.")
PLUGIN_ICON = Icons.EDIT_FIND_REPLACE
plugin_handles = any_song(has_album_splittable)
def plugin_song(self, song):
if has_album_splittable(song):
album, disc = split_album(song["album"])
if album:
song["album"] = album
if disc:
song["discnumber"] = disc
| gpl-2.0 |
wileeam/airflow | airflow/migrations/versions/bba5a7cfc896_add_a_column_to_track_the_encryption_.py | 5 | 1350 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add a column to track the encryption state of the 'Extra' field in connection
Revision ID: bba5a7cfc896
Revises: bbc73705a13e
Create Date: 2016-01-29 15:10:32.656425
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'bba5a7cfc896'
down_revision = 'bbc73705a13e'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('connection',
sa.Column('is_extra_encrypted', sa.Boolean, default=False))
def downgrade():
op.drop_column('connection', 'is_extra_encrypted')
| apache-2.0 |
andyzsf/edx | common/test/acceptance/pages/studio/video/video.py | 24 | 22244 | """
CMS Video
"""
import time
import os
import requests
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.javascript import wait_for_js, js_defined
from ....tests.helpers import YouTubeStubConfig
from ...lms.video.video import VideoPage
from selenium.webdriver.common.keys import Keys
from ..utils import wait_for_notification
CLASS_SELECTORS = {
'video_container': 'div.video',
'video_init': '.is-initialized',
'video_xmodule': '.xmodule_VideoModule',
'video_spinner': '.video-wrapper .spinner',
'video_controls': 'section.video-controls',
'attach_asset': '.upload-dialog > input[type="file"]',
'upload_dialog': '.wrapper-modal-window-assetupload',
'xblock': '.add-xblock-component',
'slider_range': '.slider-range',
'error': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_bar': '.videolist-extra-videos',
'status': '.transcripts-message-status',
'attach_transcript': '.file-chooser > input[type="file"]',
}
BUTTON_SELECTORS = {
'create_video': 'a[data-category="video"]',
'handout_download': '.video-handout.video-download-button a',
'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action',
'upload_asset': '.upload-action',
'asset_submit': '.action-upload',
'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear',
'translations_clear': '.metadata-video-translations .setting-clear',
'translation_add': '.wrapper-translations-settings > a',
'import': '.setting-import',
'download_to_edit': '.setting-download',
'disabled_download_to_edit': '.setting-download.is-disabled',
'upload_new_timed_transcripts': '.setting-upload',
'replace': '.setting-replace',
'choose': '.setting-choose',
'use_existing': '.setting-use-existing',
'collapse_link': '.collapse-action.collapse-setting',
}
DISPLAY_NAME = "Component Display Name"
DEFAULT_SETTINGS = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'http://youtu.be/OEoXaMPEzfM, , ', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Default Timed Transcript', '', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['EdX Video ID', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', 'OEoXaMPEzfM', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery', 'window.XModule', 'window.XBlock',
'window.MathJax.isReady')
class VideoComponentPage(VideoPage):
"""
CMS Video Component Page
"""
url = None
@wait_for_js
def is_browser_on_page(self):
return self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or self.q(
css='div{0}'.format(CLASS_SELECTORS['xblock'])).present
def get_element_selector(self, class_name, vertical=False):
return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical)
def _wait_for(self, check_func, desc, result=False, timeout=30):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Promise function to be fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need result from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout).fulfill()
def wait_for_video_component_render(self):
"""
Wait until video component rendered completely
"""
if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'):
self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized')
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible,
'Video Buffering Completed')
self._wait_for(self.is_controls_visible, 'Player Controls are Visible')
@wait_for_js
def is_controls_visible(self):
"""
Get current visibility sate of all video controls.
Returns:
bool: True means video controls are visible for all videos, False means video controls are not visible
for one or more videos
"""
return self.q(css=CLASS_SELECTORS['video_controls']).visible
def click_button(self, button_name, index=0, require_notification=False):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click()
if require_notification:
wait_for_notification(self)
self.wait_for_ajax()
@staticmethod
def file_path(filename):
"""
Construct file path to be uploaded to assets.
Arguments:
filename (str): asset filename
"""
return os.sep.join(__file__.split(os.sep)[:-5]) + '/data/uploads/' + filename
def upload_handout(self, handout_filename):
"""
Upload a handout file to assets
Arguments:
handout_filename (str): handout file name
"""
self.upload_asset(handout_filename)
def upload_asset(self, asset_filename, asset_type='handout', index=0):
"""
Upload a asset file to assets
Arguments:
asset_filename (str): asset file name
asset_type (str): one of `handout`, `transcript`
index (int): query index
"""
asset_file_path = self.file_path(asset_filename)
self.click_button('upload_asset', index)
self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path)
self.click_button('asset_submit')
# Only srt format transcript files can be uploaded, If an error
# occurs due to incorrect transcript file we will return from here
if asset_type == 'transcript' and self.q(css='#upload_error').present:
return
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed')
def clear_handout(self):
"""
Clear handout from settings
"""
self.click_button('handout_clear')
def _get_handout(self, url):
"""
Download handout at `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers
def download_handout(self, mime_type, is_editor=False):
"""
Download handout with mime type specified by `mime_type`
Arguments:
mime_type (str): mime type of handout file
Returns:
tuple: Handout download result.
"""
selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download']
handout_url = self.q(css=selector).attrs('href')[0]
result, headers = self._get_handout(handout_url)
return result, headers['content-type'] == mime_type
@property
def is_handout_button_visible(self):
"""
Check if handout download button is visible
"""
return self.q(css=BUTTON_SELECTORS['handout_download']).visible
def create_video(self):
"""
Create a Video Component by clicking on Video button and wait for rendering completion.
"""
# Create video
self.click_button('create_video', require_notification=True)
self.wait_for_video_component_render()
def xblocks(self):
"""
Tells the total number of video xblocks present on current unit page.
Returns:
(int): total video xblocks
"""
return len(self.q(css='.xblock-header').filter(
lambda el: 'xblock-header-video' in el.get_attribute('class')).results)
def focus_caption_line(self, line_number):
"""
Focus a caption line as specified by `line_number`
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER)
def is_caption_line_focused(self, line_number):
"""
Check if a caption line focused
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
attributes = self.q(css=caption_line_selector).attrs('class')
return 'focused' in attributes
@property
def is_slider_range_visible(self):
"""
Return True if slider range is visible.
"""
return self.q(css=CLASS_SELECTORS['slider_range']).visible
def verify_settings(self):
"""
Verify that video component has correct default settings.
"""
query = '.wrapper-comp-setting'
settings = self.q(css=query).results
if len(DEFAULT_SETTINGS) != len(settings):
return False
for counter, setting in enumerate(settings):
is_verified = self._verify_setting_entry(setting,
DEFAULT_SETTINGS[counter][0],
DEFAULT_SETTINGS[counter][1])
if not is_verified:
return is_verified
return True
@staticmethod
def _verify_setting_entry(setting, field_name, field_value):
"""
Verify a `setting` entry.
Arguments:
setting (WebElement): Selenium WebElement
field_name (str): Name of field
field_value (str): Value of field
Returns:
bool: Does `setting` have correct value.
"""
if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'):
return False
# Get class attribute values
classes = setting.get_attribute('class').split()
list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations']
is_list_type = any(list_type in classes for list_type in list_type_classes)
if is_list_type:
current_value = ', '.join(
ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item'))
elif 'metadata-videolist-enum' in setting.get_attribute('class'):
current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in
setting.find_elements_by_class_name('videolist-settings-item'))
else:
current_value = setting.find_element_by_class_name('setting-input').get_attribute('value')
if field_value != current_value:
return False
# Clear button should be visible(active class is present) for
# every setting that don't have 'metadata-videolist-enum' class
if 'metadata-videolist-enum' not in setting.get_attribute('class'):
setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0]
if 'active' not in setting_clear_button.get_attribute('class'):
return False
return True
def set_field_value(self, field_name, field_value, field_type='input'):
"""
Set settings input `field` with `value`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
field_type (str): `input`, `select` etc(more to be added later)
"""
query = '.wrapper-comp-setting > label:nth-child(1)'
field_id = ''
if field_type == 'input':
for index, _ in enumerate(self.q(css=query)):
if field_name in self.q(css=query).nth(index).text[0]:
field_id = self.q(css=query).nth(index).attrs('for')[0]
break
self.q(css='#{}'.format(field_id)).fill(field_value)
elif field_type == 'select':
self.q(css='select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click()
def verify_field_value(self, field_name, field_value):
"""
Get settings value of `field_name`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
Returns:
bool: If `field_name` has `field_value`
"""
_, setting = self._get_setting_entry(field_name)
return self._verify_setting_entry(setting, field_name, field_value)
def _get_setting_entry(self, field_name):
"""
Get setting entry of `field_name`
Arguments:
field_name (str): Name of field
Returns:
setting (WebElement): Selenium WebElement
"""
for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results):
if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name:
return index, setting
def translations_count(self):
"""
Get count of translations.
"""
return len(self.q(css='.wrapper-translations-settings .list-settings-item').results)
def select_translation_language(self, language_code, index=0):
"""
Select translation language as specified by `language_code`
Arguments:
language_code (str):
index (int): query index
"""
translations_items = '.wrapper-translations-settings .list-settings-item'
language_selector = translations_items + ' select option[value="{}"]'.format(language_code)
self.q(css=language_selector).nth(index).click()
def upload_translation(self, transcript_name, language_code):
"""
Upload a translation file.
Arguments:
transcript_name (str):
language_code (str):
"""
self.click_button('translation_add')
translations_count = self.translations_count()
self.select_translation_language(language_code, translations_count - 1)
self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1)
def replace_translation(self, old_lang_code, new_lang_code, transcript_name):
"""
Replace a translation.
Arguments:
old_lang_code (str):
new_lang_code (str):
transcript_name (str):
"""
language_codes = self.translations()
index = language_codes.index(old_lang_code)
self.select_translation_language(new_lang_code, index)
self.upload_asset(transcript_name, asset_type='transcript', index=index)
def translations(self):
"""
Extract translations
Returns:
list: list of translation language codes
"""
translations_selector = '.metadata-video-translations .remove-setting'
return self.q(css=translations_selector).attrs('data-lang')
def download_translation(self, language_code, text_to_search):
"""
Download a translation having `language_code` and containing `text_to_search`
Arguments:
language_code (str): language code
text_to_search (str): text to search in translation
Returns:
bool: whether download was successful
"""
mime_type = 'application/x-subrip'
lang_code = '/{}?'.format(language_code)
link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link]
result, headers, content = self._get_transcript(link[0])
return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8')
def remove_translation(self, language_code):
"""
Remove a translation having `language_code`
Arguments:
language_code (str): language code
"""
self.q(css='.remove-action').filter(lambda el: language_code == el.get_attribute('data-lang')).click()
@property
def upload_status_message(self):
"""
Get asset upload status message
"""
return self.q(css='#upload_error').text[0]
def captions_lines(self):
"""
Extract partial caption lines.
As all the captions lines are exactly same so only getting partial lines will work.
"""
self.wait_for_captions()
selector = '.subtitles > li:nth-child({})'
return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)])
def set_url_field(self, url, field_number):
"""
Set video url field in basic settings tab.
Arguments:
url (str): video url
field_number (int): video url field number
"""
if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False:
self.click_button('collapse_link')
self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url)
time.sleep(DELAY)
self.wait_for_ajax()
def message(self, message_type):
"""
Get video url field status/error message.
Arguments:
message_type(str): type(status, error) of message
Returns:
str: status/error message
"""
if message_type == 'status':
self.wait_for_element_visibility(CLASS_SELECTORS[message_type],
'{} message is Visible'.format(message_type.title()))
return self.q(css=CLASS_SELECTORS[message_type]).text[0]
def url_field_status(self, *field_numbers):
"""
Get video url field status(enable/disable).
Arguments:
url (str): video url
field_numbers (tuple or None): field numbers to check status for, None means get status for all.
tuple items will be integers and must start from 1
Returns:
dict: field numbers as keys and field status(bool) as values, False means a field is disabled
"""
if field_numbers:
index_list = [number - 1 for number in field_numbers]
else:
index_list = range(3) # maximum three fields
statuses = {}
for index in index_list:
status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0]
statuses[index + 1] = status
return statuses
def clear_field(self, index):
"""
Clear a video url field at index specified by `index`.
"""
self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('')
# Trigger an 'input' event after filling the field with an empty value.
self.browser.execute_script(
"$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input'))
time.sleep(DELAY)
self.wait_for_ajax()
def clear_fields(self):
"""
Clear video url fields.
"""
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.val('')
.trigger('input');
""".format(selector=CLASS_SELECTORS['url_inputs'])
self.browser.execute_script(script)
time.sleep(DELAY)
self.wait_for_ajax()
def revert_field(self, field_name):
"""
Revert a field.
"""
_, setting = self._get_setting_entry(field_name)
setting.find_element_by_class_name('setting-clear').click()
def is_transcript_button_visible(self, button_name, index=0, button_text=None):
"""
Check if a transcript related button is visible.
Arguments:
button_name (str): name of button
index (int): query index
button_text (str or None): text to match with text on a button, if None then don't match texts
Returns:
bool: is button visible
"""
is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible
is_text_matched = True
if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]:
is_text_matched = False
return is_visible and is_text_matched
def upload_transcript(self, transcript_filename):
"""
Upload a Transcript
Arguments:
transcript_filename (str): name of transcript file
"""
# Show the Browse Button
self.browser.execute_script("$('form.file-chooser').show()")
asset_file_path = self.file_path(transcript_filename)
self.q(css=CLASS_SELECTORS['attach_transcript']).results[0].send_keys(asset_file_path)
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['attach_transcript']).visible, 'Upload Completed')
| agpl-3.0 |
brandonlee503/Intro-to-Computing-Projects | COMP1001_Assignment2_14010627X.py | 1 | 2082 | #Brandon Lee
#ID: 14010627X
#9/26/14
#Question 1 Pseudocode:
#function definition: def main():
#prompt user input for first name: firstName = input("String Prompt")
#prompt user input for last time: lastName = input("String Prompt")
#combine both inputs into a single string: fullName = firstName + lastName
#initialize variable to store length of string: stringLength = len(fullName)
#create for loop and use initialized variable for range: for i in range(0,stringLength):
#print the string: print(fullName[0:i+1])
#create another for loop for going backwards, same concept as before: for i in range(stringLength-1,0,-1)
#print the string: print(fullName[0:j])
def test1():
firstN = input("Please enter your first name: ")
lastN = input("Please enter your last name: ")
fullName = firstN + " " + lastN
strlen = len(fullName)
for i in range(0,strlen):
print(fullName[0:i+1])
for j in range(strlen-1,0,-1):
print(fullName[0:j])
#Question 2 Pseudocode:
#import math library: import math
#function definition: def main():
#prompt user to enter positive n value: n = eval(input("String Prompt"))
#initialize variable to store total value as for loop iterates: totalValue = 0
#print value of e and labels: print("Value of e = ", math.e, "Round: The approximated e: ")
#create for loop with range of user input: for i in range(n)
#initialize variable to store factorial of current round: factorialVariable = math.factorial(i)
#increment total value by adding its self with (1/current value factorial): totalValue = totalValue + (1/factorialValue)
#print round number and the total value: print(i, totalValue)
import math
def test2():
n = eval(input("Enter positive n value: "))
totalVal = 0
print("The value of e is:", math.e)
print("Round: The approximated e: ")
for i in range(n):
factVar = math.factorial(i) #0 factorial = 1
totalVal = totalVal + (1/factVar)
print(i+1, " ", totalVal) | mit |
alvaroaleman/ansible | lib/ansible/modules/cloud/softlayer/sl_vm.py | 12 | 11492 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: sl_vm
short_description: create or cancel a virtual instance in SoftLayer
description:
- Creates or cancels SoftLayer instances. When created, optionally waits for it to be 'running'.
version_added: "2.1"
options:
instance_id:
description:
- Instance Id of the virtual instance to perform action option
required: false
default: null
hostname:
description:
- Hostname to be provided to a virtual instance
required: false
default: null
domain:
description:
- Domain name to be provided to a virtual instance
required: false
default: null
datacenter:
description:
- Datacenter for the virtual instance to be deployed
required: false
default: null
tags:
description:
- Tag or list of tags to be provided to a virtual instance
required: false
default: null
hourly:
description:
- Flag to determine if the instance should be hourly billed
required: false
default: true
private:
description:
- Flag to determine if the instance should be private only
required: false
default: false
dedicated:
description:
- Falg to determine if the instance should be deployed in dedicated space
required: false
default: false
local_disk:
description:
- Flag to determine if local disk should be used for the new instance
required: false
default: true
cpus:
description:
- Count of cpus to be assigned to new virtual instance
required: true
default: null
memory:
description:
- Amount of memory to be assigned to new virtual instance
required: true
default: null
disks:
description:
- List of disk sizes to be assigned to new virtual instance
required: true
default: [25]
os_code:
description:
- OS Code to be used for new virtual instance
required: false
default: null
image_id:
description:
- Image Template to be used for new virtual instance
required: false
default: null
nic_speed:
description:
- NIC Speed to be assigned to new virtual instance
required: false
default: 10
public_vlan:
description:
- VLAN by its Id to be assigned to the public NIC
required: false
default: null
private_vlan:
description:
- VLAN by its Id to be assigned to the private NIC
required: false
default: null
ssh_keys:
description:
- List of ssh keys by their Id to be assigned to a virtual instance
required: false
default: null
post_uri:
description:
- URL of a post provisioning script to be loaded and executed on virtual instance
required: false
default: null
state:
description:
- Create, or cancel a virtual instance. Specify "present" for create, "absent" to cancel.
required: false
default: 'present'
wait:
description:
- Flag used to wait for active status before returning
required: false
default: true
wait_timeout:
description:
- time in seconds before wait returns
required: false
default: 600
requirements:
- "python >= 2.6"
- "softlayer >= 4.1.1"
author: "Matt Colton (@mcltn)"
'''
EXAMPLES = '''
- name: Build instance
hosts: localhost
gather_facts: False
tasks:
- name: Build instance request
sl_vm:
hostname: instance-1
domain: anydomain.com
datacenter: dal09
tags: ansible-module-test
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks: [25]
os_code: UBUNTU_LATEST
wait: False
- name: Build additional instances
hosts: localhost
gather_facts: False
tasks:
- name: Build instances request
sl_vm:
hostname: "{{ item.hostname }}"
domain: "{{ item.domain }}"
datacenter: "{{ item.datacenter }}"
tags: "{{ item.tags }}"
hourly: "{{ item.hourly }}"
private: "{{ item.private }}"
dedicated: "{{ item.dedicated }}"
local_disk: "{{ item.local_disk }}"
cpus: "{{ item.cpus }}"
memory: "{{ item.memory }}"
disks: "{{ item.disks }}"
os_code: "{{ item.os_code }}"
ssh_keys: "{{ item.ssh_keys }}"
wait: "{{ item.wait }}"
with_items:
- hostname: instance-2
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- hostname: instance-3
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- name: Cancel instances
hosts: localhost
gather_facts: False
tasks:
- name: Cancel by tag
sl_vm:
state: absent
tags: ansible-module-test
'''
# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
RETURN = '''# '''
import time
#TODO: get this info from API
STATES = ['present', 'absent']
DATACENTERS = ['ams01','ams03','che01','dal01','dal05','dal06','dal09','dal10','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','osl01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04']
CPU_SIZES = [1,2,4,8,16,32,56]
MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536,131072,247808]
INITIALDISK_SIZES = [25,100]
LOCALDISK_SIZES = [25,100,150,200,300]
SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000]
NIC_SPEEDS = [10,100,1000]
try:
import SoftLayer
from SoftLayer import VSManager
HAS_SL = True
vsManager = VSManager(SoftLayer.create_client_from_env())
except ImportError:
HAS_SL = False
def create_virtual_instance(module):
instances = vsManager.list_instances(
hostname = module.params.get('hostname'),
domain = module.params.get('domain'),
datacenter = module.params.get('datacenter')
)
if instances:
return False, None
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
if (module.params.get('os_code') != None and module.params.get('os_code') != ''):
module.params['image_id'] = ''
elif (module.params.get('image_id') != None and module.params.get('image_id') != ''):
module.params['os_code'] = ''
module.params['disks'] = [] # Blank out disks since it will use the template
else:
return False, None
tags = module.params.get('tags')
if isinstance(tags, list):
tags = ','.join(map(str, module.params.get('tags')))
instance = vsManager.create_instance(
hostname = module.params.get('hostname'),
domain = module.params.get('domain'),
cpus = module.params.get('cpus'),
memory = module.params.get('memory'),
hourly = module.params.get('hourly'),
datacenter = module.params.get('datacenter'),
os_code = module.params.get('os_code'),
image_id = module.params.get('image_id'),
local_disk = module.params.get('local_disk'),
disks = module.params.get('disks'),
ssh_keys = module.params.get('ssh_keys'),
nic_speed = module.params.get('nic_speed'),
private = module.params.get('private'),
public_vlan = module.params.get('public_vlan'),
private_vlan = module.params.get('private_vlan'),
dedicated = module.params.get('dedicated'),
post_uri = module.params.get('post_uri'),
tags = tags)
if instance != None and instance['id'] > 0:
return True, instance
else:
return False, None
def wait_for_instance(module,id):
instance = None
completed = False
wait_timeout = time.time() + module.params.get('wait_time')
while not completed and wait_timeout > time.time():
try:
completed = vsManager.wait_for_ready(id, 10, 2)
if completed:
instance = vsManager.get_instance(id)
except:
completed = False
return completed, instance
def cancel_instance(module):
canceled = True
if module.params.get('instance_id') == None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
tags = module.params.get('tags')
if isinstance(tags, basestring):
tags = [module.params.get('tags')]
instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
for instance in instances:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
else:
return False, None
return canceled, None
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(),
hostname=dict(),
domain=dict(),
datacenter=dict(choices=DATACENTERS),
tags=dict(),
hourly=dict(type='bool', default=True),
private=dict(type='bool', default=False),
dedicated=dict(type='bool', default=False),
local_disk=dict(type='bool', default=True),
cpus=dict(type='int', choices=CPU_SIZES),
memory=dict(type='int', choices=MEMORY_SIZES),
disks=dict(type='list', default=[25]),
os_code=dict(),
image_id=dict(),
nic_speed=dict(type='int', choices=NIC_SPEEDS),
public_vlan=dict(),
private_vlan=dict(),
ssh_keys=dict(type='list', default=[]),
post_uri=dict(),
state=dict(default='present', choices=STATES),
wait=dict(type='bool', default=True),
wait_time=dict(type='int', default=600)
)
)
if not HAS_SL:
module.fail_json(msg='softlayer python library required for this module')
if module.params.get('state') == 'absent':
(changed, instance) = cancel_instance(module)
elif module.params.get('state') == 'present':
(changed, instance) = create_virtual_instance(module)
if module.params.get('wait') == True and instance:
(changed, instance) = wait_for_instance(module, instance['id'])
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Ensighten/Diamond | src/collectors/nfs/nfs.py | 16 | 8775 | # coding=utf-8
"""
The NfsCollector collects nfs utilization metrics using /proc/net/rpc/nfs.
#### Dependencies
* /proc/net/rpc/nfs
"""
import diamond.collector
import os
class NfsCollector(diamond.collector.Collector):
PROC = '/proc/net/rpc/nfs'
def get_default_config_help(self):
config_help = super(NfsCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NfsCollector, self).get_default_config()
config.update({
'path': 'nfs'
})
return config
def collect(self):
"""
Collect stats
"""
if os.access(self.PROC, os.R_OK):
results = {}
# Open file
file = open(self.PROC)
for line in file:
line = line.split()
if line[0] == 'net':
results['net.packets'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif line[0] == 'rpc':
results['rpc.calls'] = line[1]
results['rpc.retrans'] = line[2]
results['rpc.authrefrsh'] = line[3]
elif line[0] == 'proc2':
line.pop(1) # remove column-cnt field
results['v2.null'] = line[1]
results['v2.getattr'] = line[2]
results['v2.setattr'] = line[3]
results['v2.root'] = line[4]
results['v2.lookup'] = line[5]
results['v2.readlink'] = line[6]
results['v2.read'] = line[7]
results['v2.wrcache'] = line[8]
results['v2.write'] = line[9]
results['v2.create'] = line[10]
results['v2.remove'] = line[11]
results['v2.rename'] = line[12]
results['v2.link'] = line[13]
results['v2.symlink'] = line[14]
results['v2.mkdir'] = line[15]
results['v2.rmdir'] = line[16]
results['v2.readdir'] = line[17]
results['v2.fsstat'] = line[18]
elif line[0] == 'proc3':
line.pop(1) # remove column-cnt field
results['v3.null'] = line[1]
results['v3.getattr'] = line[2]
results['v3.setattr'] = line[3]
results['v3.lookup'] = line[4]
results['v3.access'] = line[5]
results['v3.readlink'] = line[6]
results['v3.read'] = line[7]
results['v3.write'] = line[8]
results['v3.create'] = line[9]
results['v3.mkdir'] = line[10]
results['v3.symlink'] = line[11]
results['v3.mknod'] = line[12]
results['v3.remove'] = line[13]
results['v3.rmdir'] = line[14]
results['v3.rename'] = line[15]
results['v3.link'] = line[16]
results['v3.readdir'] = line[17]
results['v3.readdirplus'] = line[18]
results['v3.fsstat'] = line[19]
results['v3.fsinfo'] = line[20]
results['v3.pathconf'] = line[21]
results['v3.commit'] = line[22]
elif line[0] == 'proc4':
line.pop(1) # remove column-cnt field
results['v4.null'] = line[1]
results['v4.read'] = line[2]
results['v4.write'] = line[3]
results['v4.commit'] = line[4]
results['v4.open'] = line[5]
results['v4.open_conf'] = line[6]
results['v4.open_noat'] = line[7]
results['v4.open_dgrd'] = line[8]
results['v4.close'] = line[9]
results['v4.setattr'] = line[10]
results['v4.fsinfo'] = line[11]
results['v4.renew'] = line[12]
results['v4.setclntid'] = line[13]
results['v4.confirm'] = line[14]
results['v4.lock'] = line[15]
results['v4.lockt'] = line[16]
results['v4.locku'] = line[17]
results['v4.access'] = line[18]
results['v4.getattr'] = line[19]
results['v4.lookup'] = line[20]
results['v4.lookup_root'] = line[21]
results['v4.remove'] = line[22]
results['v4.rename'] = line[23]
results['v4.link'] = line[24]
results['v4.symlink'] = line[25]
results['v4.create'] = line[26]
results['v4.pathconf'] = line[27]
results['v4.statfs'] = line[28]
results['v4.readlink'] = line[29]
results['v4.readdir'] = line[30]
try:
results['v4.server_caps'] = line[31]
except IndexError:
pass
try:
results['v4.delegreturn'] = line[32]
except IndexError:
pass
try:
results['v4.getacl'] = line[33]
except IndexError:
pass
try:
results['v4.setacl'] = line[34]
except IndexError:
pass
try:
results['v4.fs_locations'] = line[35]
except IndexError:
pass
try:
results['v4.rel_lkowner'] = line[36]
except IndexError:
pass
try:
results['v4.exchange_id'] = line[37]
except IndexError:
pass
try:
results['v4.create_ses'] = line[38]
except IndexError:
pass
try:
results['v4.destroy_ses'] = line[39]
except IndexError:
pass
try:
results['v4.sequence'] = line[40]
except IndexError:
pass
try:
results['v4.get_lease_t'] = line[41]
except IndexError:
pass
try:
results['v4.reclaim_comp'] = line[42]
except IndexError:
pass
try:
results['v4.layoutget'] = line[43]
except IndexError:
pass
try:
results['v4.layoutcommit'] = line[44]
except IndexError:
pass
try:
results['v4.layoutreturn'] = line[45]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[46]
except IndexError:
pass
try:
results['v4.getdevinfo'] = line[47]
except IndexError:
pass
try:
results['v4.ds_write'] = line[48]
except IndexError:
pass
try:
results['v4.ds_commit'] = line[49]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[50]
except IndexError:
pass
# Close File
file.close()
for stat in results.keys():
metric_name = stat
metric_value = long(float(results[stat]))
metric_value = self.derivative(metric_name, metric_value)
self.publish(metric_name, metric_value, precision=3)
return True
return False
| mit |
liweitianux/chandra-acis-analysis | acispy/spectrum.py | 1 | 1572 | # Copyright (c) 2017 Weitian LI <[email protected]>
# MIT license
"""
Chandra ACIS spectrum.
"""
from astropy.io import fits
from .acis import ACIS
class Spectrum:
"""
Chandra ACIS spectrum
"""
def __init__(self, filepath):
self.filepath = filepath
self.fitsobj = fits.open(filepath)
ext_spec = self.fitsobj["SPECTRUM"]
self.header = ext_spec.header
# spectral data
self.channel = ext_spec.data.columns["CHANNEL"].array
self.counts = ext_spec.data.columns["COUNTS"].array
# spectral keywords
self.EXPOSURE = self.header.get("EXPOSURE")
self.BACKSCAL = self.header.get("BACKSCAL")
def calc_flux(self, elow, ehigh, verbose=False):
"""
Calculate the flux:
flux = counts / exposure / area
Parameters
----------
elow, ehigh : float, optional
Lower and upper energy limit to calculate the flux.
"""
chlow = ACIS.energy2channel(elow)
chhigh = ACIS.energy2channel(ehigh)
counts = self.counts[(chlow-1):chhigh].sum()
if verbose:
print("counts / exposure / backscale :: %d / %.1f / %.5g" %
(counts, self.EXPOSURE, self.BACKSCAL))
flux = counts / self.EXPOSURE / self.BACKSCAL
return flux
def calc_pb_flux(self, elow=9500, ehigh=12000, verbose=False):
"""
Calculate the particle background (default: 9.5-12 keV) flux.
"""
return self.calc_flux(elow=elow, ehigh=ehigh, verbose=verbose)
| mit |
slightperturbation/Cobalt | ext/emsdk_portable/emscripten/1.27.0/tools/jsrun.py | 1 | 2085 | import time, os, sys, logging
from subprocess import Popen, PIPE, STDOUT
TRACK_PROCESS_SPAWNS = True if (os.getenv('EM_BUILD_VERBOSE') and int(os.getenv('EM_BUILD_VERBOSE')) >= 3) else False
def timeout_run(proc, timeout=None, note='unnamed process', full_output=False):
start = time.time()
if timeout is not None:
while time.time() - start < timeout and proc.poll() is None:
time.sleep(0.1)
if proc.poll() is None:
proc.kill() # XXX bug: killing emscripten.py does not kill it's child process!
raise Exception("Timed out: " + note)
out = proc.communicate()
out = map(lambda o: '' if o is None else o, out)
if TRACK_PROCESS_SPAWNS:
logging.info('Process ' + str(proc.pid) + ' finished after ' + str(time.time() - start) + ' seconds. Exit code: ' + str(proc.returncode))
return '\n'.join(out) if full_output else out[0]
def run_js(filename, engine=None, args=[], check_timeout=False, stdin=None, stdout=PIPE, stderr=None, cwd=None, full_output=False, assert_returncode=0, error_limit=-1):
if type(engine) is not list:
engine = [engine]
command = engine + [filename] + (['--'] if 'd8' in engine[0] or 'jsc' in engine[0] else []) + args
try:
if cwd is not None: os.environ['EMCC_BUILD_DIR'] = os.getcwd()
proc = Popen(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd)
finally:
if cwd is not None: del os.environ['EMCC_BUILD_DIR']
timeout = 15*60 if check_timeout else None
if TRACK_PROCESS_SPAWNS:
logging.info('Blocking on process ' + str(proc.pid) + ': ' + str(command) + (' for ' + str(timeout) + ' seconds' if timeout else ' until it finishes.'))
ret = timeout_run(
proc,
timeout,
'Execution',
full_output=full_output)
if assert_returncode is not None and proc.returncode is not assert_returncode:
raise Exception('Expected the command ' + str(command) + ' to finish with return code ' + str(assert_returncode) + ', but it returned with code ' + str(proc.returncode) + ' instead! Output: ' + str(ret)[:error_limit])
return ret
| apache-2.0 |
barak/ciml | projects/p1/runClassifier.py | 4 | 6063 | """
This module is for training, testing an evaluating classifiers.
"""
from numpy import *
from pylab import *
import sys
import util
import binary
def trainTest(classifier, X, Y, Xtest, Ytest):
"""
Train a classifier on data (X,Y) and evaluate on
data (Xtest,Ytest). Return a triple of:
* Training data accuracy
* Test data accuracy
* Individual predictions on Xtest.
"""
classifier.reset() # initialize the classifier
classifier.train(X, Y); # train it
#print "Learned Classifier:"
#print classifier
Ypred = classifier.predictAll(X); # predict the training data
trAcc = mean((Y >= 0) == (Ypred >= 0)); # check to see how often the predictions are right
Ypred = classifier.predictAll(Xtest); # predict the training data
teAcc = mean((Ytest >= 0) == (Ypred >= 0)); # check to see how often the predictions are right
print "Training accuracy %g, test accuracy %g" % (trAcc, teAcc)
return (trAcc, teAcc, Ypred)
def trainTestSet(classifier, dataset):
trainTest(classifier, dataset.X, dataset.Y, dataset.Xte, dataset.Yte)
def learningCurve(classifier, X, Y, Xtest, Ytest):
"""
Generate a learning curve by repeatedly halving the amount of
training data until none is left.
We return a triple containing:
* The sizes of data sets we trained on
* The training accuracies at each level
* The test accuracies at each level
"""
N = X.shape[0] # how many total points?
M = int(ceil(log2(N))) # how many classifiers will we have to train?
dataSizes = zeros(M)
trainAcc = zeros(M)
testAcc = zeros(M)
for i in range(1, M+1): # loop over "skip lengths"
# select every 2^(M-i)th point
ids = arange(0, N, 2**(M-i))
Xtr = X[ids, :]
Ytr = Y[ids]
# report what we're doing
print "Training classifier on %d points..." % ids.size
# train the classifier
(trAcc, teAcc, Ypred) = trainTest(classifier, Xtr, Ytr, Xtest, Ytest)
# store the results
dataSizes[i-1] = ids.size
trainAcc[i-1] = trAcc
testAcc[i-1] = teAcc
return (dataSizes, trainAcc, testAcc)
def learningCurveSet(classifier, dataset):
return learningCurve(classifier, dataset.X, dataset.Y, dataset.Xte, dataset.Yte)
def hyperparamCurve(classifier, hpName, hpValues, X, Y, Xtest, Ytest):
M = len(hpValues)
trainAcc = zeros(M)
testAcc = zeros(M)
for m in range(M):
# report what we're doing
print "Training classifier with %s=%g..." % (hpName, hpValues[m])
# train the classifier
classifier.setOption(hpName, hpValues[m])
classifier.reset()
(trAcc, teAcc, Ypred) = trainTest(classifier, X, Y, Xtest, Ytest)
# store the results
trainAcc[m] = trAcc
testAcc[m] = teAcc
return (hpValues, trainAcc, testAcc)
def hyperparamCurveSet(classifier, hpName, hpValues, dataset):
return hyperparamCurve(classifier, hpName, hpValues, dataset.X, dataset.Y, dataset.Xte, dataset.Yte)
def plotCurve(titleString, res):
plot(res[0], res[1], 'b-',
res[0], res[2], 'r-')
legend( ('Train', 'Test') )
#xlabel('# of training points')
ylabel('Accuracy')
title(titleString)
show()
def shufflePoints(X, Y):
"""
Randomize the order of the points.
"""
[N,D] = X.shape
order = range(N)
util.permute(order)
retX = X[order,:]
retY = Y[order]
return (retX, retY)
def plotData(X, Y):
plot(X[Y>=0,0], X[Y>=0,1], 'bo',
X[Y< 0,0], X[Y< 0,1], 'rx')
legend( ('+1', '-1') )
show(False)
def plotClassifier(w, b):
axes = figure(1).get_axes()[0]
xlim = axes.get_xlim()
ylim = axes.get_ylim()
xmin = xlim[0] + (xlim[1] - xlim[0]) / 100
xmax = xlim[1] - (xlim[1] - xlim[0]) / 100
ymin = ylim[0] + (ylim[1] - ylim[0]) / 100
ymax = ylim[1] - (ylim[1] - ylim[0]) / 100
# find the zeros along each axis
# w0*l + w1*? + b = 0 ==> ? = -(b + w0*l) / w1
xmin_zero = - (b + w[0] * xmin) / w[1]
xmax_zero = - (b + w[0] * xmax) / w[1]
ymin_zero = - (b + w[1] * ymin) / w[0]
ymax_zero = - (b + w[1] * ymax) / w[0]
# now, two of these should actually be in bounds, figure out which
inBounds = []
if ylim[0] <= xmin_zero and xmin_zero <= ylim[1]:
inBounds.append( (xmin, xmin_zero) )
if ylim[0] <= xmax_zero and xmax_zero <= ylim[1]:
inBounds.append( (xmax, xmax_zero) )
if xlim[0] <= ymin_zero and ymin_zero <= xlim[1]:
inBounds.append( (ymin_zero, ymin) )
if xlim[0] <= ymax_zero and ymax_zero <= xlim[1]:
inBounds.append( (ymax_zero, ymax) )
plot( array([inBounds[0][0], inBounds[1][0]]), array([inBounds[0][1], inBounds[1][1]]), 'g-', linewidth=2 )
show(False)
#print axes
#figure(1).set_axes(axes)
def dumpMegamFormat(fname, Xtr, Ytr, Xte, Yte):
def writeIt(f, X, Y):
N,D = X.shape
for n in range(N):
f.write(str(Y[n]))
for d in range(D):
if X[n,d] != 0:
f.write(" f" + str(d) + " " + str(X[n,d]))
f.write("\n")
f = open(fname, 'w')
writeIt(f, Xtr, Ytr)
f.write("TEST\n")
writeIt(f, Xte, Yte)
f.close()
def dumpMegamFormatSet(fname, dataset):
dumpMegamFormat(fname, dataset.X, dataset.Y, dataset.Xte, dataset.Yte)
def dumpSVMFormat(fname, Xtr, Ytr, Xte, Yte):
def writeIt(f, X, Y):
N,D = X.shape
for n in range(N):
f.write(str(Y[n]))
for d in range(D):
if X[n,d] != 0:
f.write(" " + str(d+1) + ":" + str(X[n,d]))
f.write("\n")
f = open(fname, 'w')
writeIt(f, Xtr, Ytr)
writeIt(f, Xte, Yte)
f.close()
def dumpSVMFormatSet(fname, dataset):
dumpSVMFormat(fname, dataset.X, dataset.Y, dataset.Xte, dataset.Yte)
| gpl-2.0 |
deveninfotech/deven-frappe | frappe/website/doctype/blog_post/test_blog_post.py | 29 | 6159 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import json
import frappe.model.meta
from frappe.core.page.user_permissions.user_permissions import add, remove, get_permissions
from frappe.permissions import clear_user_permissions_for_doctype, get_doc_permissions
test_records = frappe.get_test_records('Blog Post')
test_dependencies = ["User"]
class TestBlogPost(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Website Manager")
user = frappe.get_doc("User", "[email protected]")
user.add_roles("Blogger")
frappe.set_user("[email protected]")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
frappe.db.sql("""update `tabDocPerm` set user_permission_doctypes=null
where parent='Blog Post' and permlevel=0 and apply_user_permissions=1
and `read`=1""")
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_user_permissions_in_doc(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1",
"[email protected]")
frappe.set_user("[email protected]")
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1", "[email protected]")
frappe.set_user("[email protected]")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("_test-blog-post-1" in names)
self.assertFalse("_test-blog-post" in names)
def test_default_values(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1", "[email protected]")
frappe.set_user("[email protected]")
doc = frappe.new_doc("Blog Post")
self.assertEquals(doc.get("blog_category"), "_Test Blog Category 1")
def test_user_link_match_doc(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "[email protected]"
blogger.save()
frappe.set_user("[email protected]")
post = frappe.get_doc("Blog Post", "_test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "[email protected]"
blogger.save()
frappe.set_user("[email protected]")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("_test-blog-post-2" in names)
self.assertFalse("_test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("[email protected]")
add("[email protected]", "Blog Post", "_test-blog-post")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("[email protected]")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add,
"[email protected]", "Blog Post", "_test-blog-post")
def test_read_if_explicit_user_permissions_are_set(self):
self.test_set_user_permissions()
frappe.set_user("[email protected]")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
defname = get_permissions("[email protected]", "Blog Post", "_test-blog-post")[0].name
frappe.set_user("[email protected]")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove,
"[email protected]", defname, "Blog Post", "_test-blog-post")
def test_user_permissions_based_on_blogger(self):
frappe.set_user("[email protected]")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("[email protected]")
add("[email protected]", "Blog Post", "_test-blog-post")
frappe.set_user("[email protected]")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
blog_post.get_field("title").set_only_once = 1
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_user_permission_doctypes(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1",
"[email protected]")
frappe.permissions.add_user_permission("Blogger", "_Test Blogger 1",
"[email protected]")
frappe.set_user("[email protected]")
frappe.db.sql("""update `tabDocPerm` set user_permission_doctypes=%s
where parent='Blog Post' and permlevel=0 and apply_user_permissions=1
and `read`=1""", json.dumps(["Blogger"]))
frappe.model.meta.clear_cache("Blog Post")
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "_test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.model.meta.clear_cache("Blog Post")
| mit |
cyclecomputing/boto | tests/unit/s3/test_tagging.py | 136 | 1526 | from tests.unit import AWSMockServiceTestCase
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from boto.s3.tagging import Tag
class TestS3Tagging(AWSMockServiceTestCase):
connection_class = S3Connection
def default_body(self):
return """
<Tagging>
<TagSet>
<Tag>
<Key>Project</Key>
<Value>Project One</Value>
</Tag>
<Tag>
<Key>User</Key>
<Value>jsmith</Value>
</Tag>
</TagSet>
</Tagging>
"""
def test_parse_tagging_response(self):
self.set_http_response(status_code=200)
b = Bucket(self.service_connection, 'mybucket')
api_response = b.get_tags()
# The outer list is a list of tag sets.
self.assertEqual(len(api_response), 1)
# The inner list is a list of tags.
self.assertEqual(len(api_response[0]), 2)
self.assertEqual(api_response[0][0].key, 'Project')
self.assertEqual(api_response[0][0].value, 'Project One')
self.assertEqual(api_response[0][1].key, 'User')
self.assertEqual(api_response[0][1].value, 'jsmith')
def test_tag_equality(self):
t1 = Tag('foo', 'bar')
t2 = Tag('foo', 'bar')
t3 = Tag('foo', 'baz')
t4 = Tag('baz', 'bar')
self.assertEqual(t1, t2)
self.assertNotEqual(t1, t3)
self.assertNotEqual(t1, t4)
| mit |
MTG/essentia | test/src/unittests/spectral/test_tensorflowinputmusicnn.py | 1 | 3641 | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import numpy as np
class TestTensorflowInputMusiCNN(TestCase):
def testZeroSpectrum(self):
# Inputting zeros should return zero.
size = 512
self.assertEqualVector(TensorflowInputMusiCNN()(zeros(size)), zeros(96))
def testRegression(self):
# Hardcoded analysis parameters
sampleRate = 16000
frameSize = 512
hopSize = 256
audio = MonoLoader(filename=join(testdata.audio_dir, 'recorded/vignesh.wav'),
sampleRate=sampleRate)()
expected = [0.60742337, 0.30574673, 0.45560792, 1.256332, 2.4021673, 3.4365354,
3.619476, 3.0021546, 2.1846066, 1.5598421, 1.4810421, 2.2823677,
2.679103, 2.86526, 2.6989846, 2.2553382, 2.219071, 2.3255587,
2.8502884, 2.9091403, 2.7634032, 2.4637504, 1.8271459, 1.522163,
1.7100089, 1.8728845, 1.6959977, 1.3508593, 0.9608341, 0.9133418,
1.0304681, 1.493988, 1.6636051, 1.4825928, 1.1171728, 0.93050385,
1.2989489, 1.7412357, 1.7828379, 1.5357956, 1.0274258, 1.4541839,
1.8527577, 1.8838495, 1.4812496, 1.4385983, 2.1568356, 2.3677773,
1.9438239, 1.5913178, 1.8563453, 1.7012404, 1.1431638, 1.0995349,
1.1092283, 0.74655735, 0.6698305, 0.7290597, 0.47290954, 0.64479357,
0.7136836, 0.9934933, 1.3321629, 1.1683794, 1.2097421, 1.1075293,
1.0301174, 0.9288259, 0.8876033, 0.8086145, 0.9854008, 1.0852002,
1.2092237, 1.2816739, 1.2066866, 0.52382684, 0.1494276, 0.08070073,
0.09443883, 0.12541461, 0.11942478, 0.1558171, 0.17869301, 0.36044103,
0.5242918, 0.7467586, 0.8322874, 0.7977463, 0.8188014, 0.80939233,
0.74459517, 0.5341967, 0.4339693, 0.33098528, 0.10355855, 0.00549104]
tfmf = TensorflowInputMusiCNN()
frames = [tfmf(frame) for frame in FrameGenerator(audio,
frameSize=frameSize,
hopSize=hopSize)]
obtained = numpy.mean(array(frames), axis=0)
self.assertAlmostEqualVector(obtained, expected, 1e-2)
def testInvalidInput(self):
self.assertComputeFails(TensorflowInputMusiCNN(), [])
def testWrongInputSize(self):
# mel bands should fail for input size different to 512
self.assertComputeFails(TensorflowInputMusiCNN(), [0.5] * 1)
self.assertComputeFails(TensorflowInputMusiCNN(), [0.5] * 514)
suite = allTests(TestTensorflowInputMusiCNN)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
KDB2/OpenReliability | veusz/qtwidgets/__init__.py | 8 | 1599 | # Copyright (C) 2011 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Veusz qtwidgets module."""
# insert history combo into the list of modules so that it can be found
# by loadUi - yuck
import sys
from . import historycombo
from . import historycheck
from . import historyvaluecombo
from . import historygroupbox
from . import historyspinbox
from . import recentfilesbutton
from . import lineeditwithclear
sys.modules['historycombo'] = historycombo
sys.modules['historycheck'] = historycheck
sys.modules['historyvaluecombo'] = historyvaluecombo
sys.modules['historygroupbox'] = historygroupbox
sys.modules['historyspinbox'] = historyspinbox
sys.modules['recentfilesbutton'] = recentfilesbutton
sys.modules['lineeditwithclear'] = lineeditwithclear
| gpl-2.0 |
pwndbg/pwndbg | pwndbg/commands/next.py | 2 | 3572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stepping until an event occurs
"""
import argparse
import gdb
import pwndbg.commands
import pwndbg.next
@pwndbg.commands.ArgparsedCommand("Breaks at the next jump instruction.", aliases=["nextjump"])
@pwndbg.commands.OnlyWhenRunning
def nextjmp():
"""Breaks at the next jump instruction"""
if pwndbg.next.break_next_branch():
pwndbg.commands.context.context()
parser = argparse.ArgumentParser(description="""Breaks at the next call instruction""")
parser.add_argument("symbol_regex", type=str, default=None, nargs="?", help="A regex matching the name of next symbol to be broken on before calling.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def nextcall(symbol_regex=None):
"""Breaks at the next call instruction"""
if pwndbg.next.break_next_call(symbol_regex):
pwndbg.commands.context.context()
@pwndbg.commands.ArgparsedCommand("""Breaks at next return-like instruction""")
@pwndbg.commands.OnlyWhenRunning
def nextret():
"""Breaks at next return-like instruction"""
if pwndbg.next.break_next_ret():
pwndbg.commands.context.context()
@pwndbg.commands.ArgparsedCommand("""Breaks at next return-like instruction by 'stepping' to it""")
@pwndbg.commands.OnlyWhenRunning
def stepret():
"""Breaks at next return-like instruction by 'stepping' to it"""
while pwndbg.proc.alive and not pwndbg.next.break_next_ret() and pwndbg.next.break_next_branch():
# Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`)
# We need to step so that we take this branch instead of ignoring it
gdb.execute('si')
continue
if pwndbg.proc.alive:
pwndbg.commands.context.context()
@pwndbg.commands.ArgparsedCommand("""Breaks at the next instruction that belongs to the running program""")
@pwndbg.commands.OnlyWhenRunning
def nextproginstr():
"""Breaks at the next instruction that belongs to the running program"""
if pwndbg.next.break_on_program_code():
pwndbg.commands.context.context()
parser = argparse.ArgumentParser(description="""Sets a breakpoint on the instruction after this one""")
parser.add_argument("addr", type=int, default=None, nargs="?", help="The address to break after.")
@pwndbg.commands.ArgparsedCommand(parser, aliases=["so"])
@pwndbg.commands.OnlyWhenRunning
def stepover(addr=None):
"""Sets a breakpoint on the instruction after this one"""
pwndbg.next.break_on_next(addr)
@pwndbg.commands.ArgparsedCommand("Breaks at the next syscall not taking branches.",aliases=["nextsc"])
@pwndbg.commands.OnlyWhenRunning
def nextsyscall():
"""
Breaks at the next syscall not taking branches.
"""
while pwndbg.proc.alive and not pwndbg.next.break_next_interrupt() and pwndbg.next.break_next_branch():
continue
if pwndbg.proc.alive:
pwndbg.commands.context.context()
@pwndbg.commands.ArgparsedCommand("Breaks at the next syscall by taking branches.",aliases=["stepsc"])
@pwndbg.commands.OnlyWhenRunning
def stepsyscall():
"""
Breaks at the next syscall by taking branches.
"""
while pwndbg.proc.alive and not pwndbg.next.break_next_interrupt() and pwndbg.next.break_next_branch():
# Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`)
# We need to step so that we take this branch instead of ignoring it
gdb.execute('si')
continue
if pwndbg.proc.alive:
pwndbg.commands.context.context()
| mit |
cirocosta/hpcos | lib/gtest-1.6.0/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
VagrantApe/flaskMicroblog | venv/lib/python2.7/site-packages/openid/yadis/manager.py | 167 | 6081 | class YadisServiceManager(object):
"""Holds the state of a list of selected Yadis services, managing
storing it in a session and iterating over the services in order."""
def __init__(self, starting_url, yadis_url, services, session_key):
# The URL that was used to initiate the Yadis protocol
self.starting_url = starting_url
# The URL after following redirects (the identifier)
self.yadis_url = yadis_url
# List of service elements
self.services = list(services)
self.session_key = session_key
# Reference to the current service object
self._current = None
def __len__(self):
"""How many untried services remain?"""
return len(self.services)
def __iter__(self):
return self
def next(self):
"""Return the next service
self.current() will continue to return that service until the
next call to this method."""
try:
self._current = self.services.pop(0)
except IndexError:
raise StopIteration
else:
return self._current
def current(self):
"""Return the current service.
Returns None if there are no services left.
"""
return self._current
def forURL(self, url):
return url in [self.starting_url, self.yadis_url]
def started(self):
"""Has the first service been returned?"""
return self._current is not None
def store(self, session):
"""Store this object in the session, by its session key."""
session[self.session_key] = self
class Discovery(object):
"""State management for discovery.
High-level usage pattern is to call .getNextService(discover) in
order to find the next available service for this user for this
session. Once a request completes, call .finish() to clean up the
session state.
@ivar session: a dict-like object that stores state unique to the
requesting user-agent. This object must be able to store
serializable objects.
@ivar url: the URL that is used to make the discovery request
@ivar session_key_suffix: The suffix that will be used to identify
this object in the session object.
"""
DEFAULT_SUFFIX = 'auth'
PREFIX = '_yadis_services_'
def __init__(self, session, url, session_key_suffix=None):
"""Initialize a discovery object"""
self.session = session
self.url = url
if session_key_suffix is None:
session_key_suffix = self.DEFAULT_SUFFIX
self.session_key_suffix = session_key_suffix
def getNextService(self, discover):
"""Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service
"""
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service
def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service
### Lower-level methods
def getSessionKey(self):
"""Get the session key for this starting URL and suffix
@return: The session key
@rtype: str
"""
return self.PREFIX + self.session_key_suffix
def getManager(self, force=False):
"""Extract the YadisServiceManager for this object's URL and
suffix from the session.
@param force: True if the manager should be returned
regardless of whether it's a manager for self.url.
@return: The current YadisServiceManager, if it's for this
URL, or else None
"""
manager = self.session.get(self.getSessionKey())
if (manager is not None and (manager.forURL(self.url) or force)):
return manager
else:
return None
def createManager(self, services, yadis_url=None):
"""Create a new YadisService Manager for this starting URL and
suffix, and store it in the session.
@raises KeyError: When I already have a manager.
@return: A new YadisServiceManager or None
"""
key = self.getSessionKey()
if self.getManager():
raise KeyError('There is already a %r manager for %r' %
(key, self.url))
if not services:
return None
manager = YadisServiceManager(self.url, yadis_url, services, key)
manager.store(self.session)
return manager
def destroyManager(self, force=False):
"""Delete any YadisServiceManager with this starting URL and
suffix from the session.
If there is no service manager or the service manager is for a
different URL, it silently does nothing.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
"""
if self.getManager(force=force) is not None:
key = self.getSessionKey()
del self.session[key]
| bsd-3-clause |
timothyparez/PyBitmessage | src/helper_msgcoding.py | 2 | 2961 | #!/usr/bin/python2.7
import msgpack
import zlib
from debug import logger
BITMESSAGE_ENCODING_IGNORE = 0
BITMESSAGE_ENCODING_TRIVIAL = 1
BITMESSAGE_ENCODING_SIMPLE = 2
BITMESSAGE.ENCODING_EXTENDED = 3
class MsgEncode(object):
def __init__(self, message, encoding = BITMESSAGE_ENCODING_SIMPLE):
self.data = None
self.encoding = encoding
self.length = 0
if self.encoding == BITMESSAGE_ENCODING_EXTENDED:
self.encodeExtended(message)
elif self.encoding == BITMESSAGE_ENCODING_SIMPLE:
self.encodeSimple(message)
elif self.encoding == BITMESSAGE_ENCODING_TRIVIAL:
self.encodeTrivial(message)
def encodeExtended(self, message):
try:
self.data = zlib.compress(msgpack.dumps({"": "message", "subject": message['subject'], "message": ['body']}), 9)
except zlib.error:
logger.error ("Error compressing message")
raise
except msgpack.exceptions.PackException:
logger.error ("Error msgpacking message")
raise
self.length = len(self.data)
def encodeSimple(self, message):
self.data = 'Subject:' + message['subject'] + '\n' + 'Body:' + message['body']
self.length = len(self.data)
def encodeTrivial(self, message):
self.data = message['body']
self.length = len(self.data)
class MsgDecode(object):
def __init__(self, encoding, data):
self.encoding = encoding
if self.encoding == BITMESSAGE_ENCODING_EXTENDED:
self.decodeExtended(data)
elif self.encoding in [BITMESSAGE_ENCODING_SIMPLE, BITMESSAGE_ENCODING_TRIVIAL]:
self.decodeSimple(data)
return
def decodeExtended(self, data):
try:
tmp = msgpack.loads(zlib.decompress(data))
except zlib.error:
logger.error ("Error decompressing message")
raise
except (msgpack.exceptions.UnpackException,
msgpack.exceptions.ExtraData):
logger.error ("Error msgunpacking message")
raise
try:
if tmp[""] == "message":
self.body = tmp["body"]
self.subject = tmp["subject"]
except:
logger.error ("Malformed message")
raise
def decodeSimple(self, data):
bodyPositionIndex = string.find(data, '\nBody:')
if bodyPositionIndex > 1:
subject = message[8:bodyPositionIndex]
# Only save and show the first 500 characters of the subject.
# Any more is probably an attack.
subject = subject[:500]
body = message[bodyPositionIndex + 6:]
else:
subject = ''
body = message
# Throw away any extra lines (headers) after the subject.
if subject:
subject = subject.splitlines()[0]
self.subject = subject
self.message = body
| mit |
jerryjobs/thirdpartPushSystem | push/getui/igetui/igt_message.py | 1 | 2672 | __author__ = 'wei'
#from igetui.template.igt_base_template import *
#from igetui.utils.AppConditions import *
from .template.igt_base_template import *
from .utils.AppConditions import *
class IGtMessage:
def __init__(self):
self.isOffline = False
self.offlineExpireTime = 0
self.data = BaseTemplate()
self.pushNetWorkType = 0
self.priority=0
def isOffline(self):
return self.isOffline
def setOffline(self,isOffline):
self.isOffline=isOffline
def getOfflineExpireTime(self):
return self.offlineExpireTime
def setOfflineExpireTime(self,offlineExpireTime):
self.offlineExpireTime=offlineExpireTime
def getData(self):
return self.data
def setData(self,data):
self.data=data
def getPriority(self):
return self.priority
def setPriority(self,priority):
self.priority=priority
def getPushNetWorkType(self):
return self.pushNetWorkType
def setPushNetWorkType(self,pushNetWorkType):
self.pushNetWorkType=pushNetWorkType
class IGtSingleMessage(IGtMessage) :
def __init__(self):
IGtMessage.__init__(self)
class IGtListMessage(IGtMessage):
def __init__(self):
IGtMessage.__init__(self)
class IGtAppMessage(IGtMessage):
def __init__(self):
IGtMessage.__init__(self)
self.appIdList = []
self.phoneTypeList = []
self.provinceList = []
self.tagList = []
self.conditions = None
self.speed = 0
def getTagList(self):
return self.tagList
def setTagList(self,tagList):
self.tagList=tagList
def getAppIdList(self):
return self.appIdList
def setAppIdList(self,setAppIdList):
self.setAppIdList=setAppIdList
def getPhoneTypeList(self):
return self.phoneTypeList
def setPhoneTypeList(self,phoneTypeList):
self.phoneTypeList=phoneTypeList
def getProvinceList(self):
return self.provinceList
def setProvinceList(self,provinceList):
self.provinceList=provinceList
def getConditions(self):
return self.conditions;
def setConditions(self, conditions):
self.conditions = conditions
def getSpeed(self):
return self.speed
def setSpeed(self,speed):
self.speed=speed
| apache-2.0 |
matthaywardwebdesign/rethinkdb | test/interface/shard_fuzzer.py | 24 | 8023 | #!/usr/bin/env python
# Copyright 2014 RethinkDB, all rights reserved.
'''This test randomly rebalances tables and shards to probabilistically find bugs in the system.'''
from __future__ import print_function
import pprint, os, sys, time, random, threading
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
opts = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(opts)
opts['random-seed'] = vcoptparse.FloatFlag('--random-seed', random.random())
opts['num-tables'] = vcoptparse.IntFlag('--num-tables', 6) # Number of tables to create
opts['table-scale'] = vcoptparse.IntFlag('--table-scale', 7) # Factor of increasing table size
opts['duration'] = vcoptparse.IntFlag('--duration', 120) # Time to perform fuzzing in seconds
opts['ignore-timeouts'] = vcoptparse.BoolFlag('--ignore-timeouts') # Ignore table_wait timeouts and continue
opts['progress'] = vcoptparse.BoolFlag('--progress') # Write messages every 10 seconds with the time remaining
parsed_opts = opts.parse(sys.argv)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(parsed_opts)
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
server_names = [ 'War', 'Famine', 'Pestilence', 'Death' ]
tables = [ chr(ord('a') + i) for i in xrange(parsed_opts['num-tables']) ]
table_counts = [ pow(parsed_opts['table-scale'], i) for i in xrange(len(tables)) ]
db = 'test'
# The table_history contains a history of every fuzzed command run against the table
table_history = {}
for table_name in tables:
table_history[table_name] = []
def populate_table(conn, table, count):
print("Populating table '%s' with %d rows (%.2fs)" % (table, count, time.time() - startTime))
r.db(db).table(table).insert(r.range(count).map(lambda x: {'id': x})).run(conn)
def create_tables(conn):
assert len(tables) == len(table_counts)
if not dbName in r.db_list().run(conn):
r.db_create(dbName).run(conn)
print("Creating %d tables (%.2fs)" % (len(tables), time.time() - startTime))
for i in xrange(len(tables)):
r.db(db).table_create(tables[i]).run(conn)
populate_table(conn, tables[i], table_counts[i])
def generate_fuzz():
shards = []
for i in xrange(random.randint(1, 16)):
shards.append({'replicas': random.sample(server_names, random.randint(1, len(server_names)))})
shards[-1]['primary_replica'] = random.choice(shards[-1]['replicas'])
return {'shards': shards}
def fuzz_table(cluster, table, stop_event, random_seed):
random.seed(random_seed)
fuzz_attempts = 0
fuzz_successes = 0
fuzz_non_trivial_failures = 0
# All the query-related functions used in the fuzzer loop
def do_query(fn, c):
query = fn(r.db(db).table(table))
table_history[table].append(query)
return query.run(c)
def table_wait(q):
return q.wait(wait_for='all_replicas_ready', timeout=30)
def table_reconfigure(q):
return q.reconfigure(shards=random.randint(1, 16), replicas=random.randint(1, len(server_names)))
def table_rebalance(q):
return q.rebalance()
def table_config_update(q):
return q.config().update(generate_fuzz())
while not stop_event.is_set():
try:
server = random.choice(list(cluster.processes))
conn = r.connect(server.host, server.driver_port)
except:
print("Failed to connect to a server - something probably broke, stopping fuzz")
stop_event.set()
continue
try:
fuzz_attempts += 1
if random.random() > 0.2: # With an 80% probability, wait for the table before fuzzing
do_query(table_wait, conn)
rand_res = random.random()
if rand_res > 0.9: # With a 10% probability, do a basic `reconfigure`
do_query(table_reconfigure, conn)
elif rand_res > 0.8: # With a 10% probability, use `rebalance`
do_query(table_rebalance, conn)
else: # With an 80% probability, use a fuzzed configuration
do_query(table_config_update, conn)
fuzz_successes += 1
except Exception as ex:
# Ignore some errors that are natural consequences of the test
if "isn't enough data in the table" in str(ex) or \
"the table isn't currently available for reading" in str(ex) or \
(parsed_opts['ignore-timeouts'] and "Timed out while waiting for tables" in str(ex)):
pass
else:
fuzz_non_trivial_failures += 1
print("Fuzz of table '%s' failed (%.2fs): %s" % (table, time.time() - startTime, str(ex)))
try:
(config, status) = r.expr([r.db(db).table(table).config(), r.db(db).table(table).status()]).run(conn)
print("Table '%s' config:\n%s" % (table, pprint.pformat(config)))
print("Table '%s' status:\n%s" % (table, pprint.pformat(status)))
except Exception as ex:
print("Could not get config or status for table '%s': %s" % (table, str(ex)))
print("Table '%s' history:\n%s" % (table, pprint.pformat(table_history[table])))
print("Stopped fuzzing on table '%s', attempts: %d, successes: %d, non-trivial failures: %d (%.2fs)" %
(table, fuzz_attempts, fuzz_successes, fuzz_non_trivial_failures, time.time() - startTime))
sys.stdout.flush()
print("Spinning up %d servers (%.2fs)" % (len(server_names), time.time() - startTime))
with driver.Cluster(initial_servers=server_names, output_folder='.', command_prefix=command_prefix,
extra_options=serve_options, wait_until_ready=True) as cluster:
cluster.check()
print("table counts: %s" % table_counts)
print("Server driver ports: %s" % (str([x.driver_port for x in cluster])))
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(host=cluster[0].host, port=cluster[0].driver_port)
create_tables(conn)
random.seed(parsed_opts['random-seed'])
print("Fuzzing shards for %ds, random seed: %s (%.2fs)" %
(parsed_opts['duration'], repr(parsed_opts['random-seed']), time.time() - startTime))
stop_event = threading.Event()
table_threads = []
for table in tables:
table_threads.append(threading.Thread(target=fuzz_table, args=(cluster, table, stop_event, random.random())))
table_threads[-1].start()
last_time = time.time()
end_time = last_time + parsed_opts['duration']
while (time.time() < end_time) and not stop_event.is_set():
# TODO: random disconnections / kills during fuzzing
time.sleep(0.2)
current_time = time.time()
if parsed_opts['progress'] and int((end_time - current_time) / 10) < int((end_time - last_time) / 10):
print("%ds remaining (%.2fs)" % (int(end_time - current_time) + 1, time.time() - startTime))
last_time = current_time
if not all([x.is_alive() for x in table_threads]):
stop_event.set()
print("Stopping fuzzing (%d of %d threads remain) (%.2fs)" % (len(table_threads), len(tables), time.time() - startTime))
stop_event.set()
for thread in table_threads:
thread.join()
for i in xrange(len(tables)):
print("Checking contents of table '%s' (%.2fs)" % (tables[i], time.time() - startTime))
# TODO: check row data itself
r.db(db).table(tables[i]).wait().run(conn)
count = r.db(db).table(tables[i]).count().run(conn)
assert count == table_counts[i], "Incorrect table count following fuzz of table '%s', found %d of expected %d" % (tables[i], count, table_counts[i])
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 |
axinging/chromium-crosswalk | tools/perf/PRESUBMIT.py | 6 | 5076 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/perf/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
import re
import sys
def _CommonChecks(input_api, output_api):
"""Performs common checks, which includes running pylint."""
results = []
results.extend(_CheckWprShaFiles(input_api, output_api))
results.extend(_CheckJson(input_api, output_api))
results.extend(input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='pylintrc')))
return results
def _GetPathsToPrepend(input_api):
perf_dir = input_api.PresubmitLocalPath()
chromium_src_dir = input_api.os_path.join(perf_dir, '..', '..')
telemetry_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'telemetry')
experimental_dir = input_api.os_path.join(
chromium_src_dir, 'third_party', 'catapult', 'experimental')
return [
telemetry_dir,
input_api.os_path.join(telemetry_dir, 'third_party', 'mock'),
experimental_dir,
]
def _CheckWprShaFiles(input_api, output_api):
"""Check whether the wpr sha files have matching URLs."""
old_sys_path = sys.path
try:
perf_dir = input_api.PresubmitLocalPath()
catapult_path = os.path.abspath(os.path.join(
perf_dir, '..', '..', 'third_party', 'catapult', 'catapult_base'))
sys.path.insert(1, catapult_path)
from catapult_base import cloud_storage # pylint: disable=import-error
finally:
sys.path = old_sys_path
results = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if not filename.endswith('wpr.sha1'):
continue
expected_hash = cloud_storage.ReadHash(filename)
is_wpr_file_uploaded = any(
cloud_storage.Exists(bucket, expected_hash)
for bucket in cloud_storage.BUCKET_ALIASES.itervalues())
if not is_wpr_file_uploaded:
wpr_filename = filename[:-5]
results.append(output_api.PresubmitError(
'The file matching %s is not in Cloud Storage yet.\n'
'You can upload your new WPR archive file with the command:\n'
'depot_tools/upload_to_google_storage.py --bucket '
'<Your pageset\'s bucket> %s.\nFor more info: see '
'http://www.chromium.org/developers/telemetry/'
'record_a_page_set#TOC-Upload-the-recording-to-Cloud-Storage' %
(filename, wpr_filename)))
return results
def _CheckJson(input_api, output_api):
"""Checks whether JSON files in this change can be parsed."""
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if os.path.splitext(filename)[1] != '.json':
continue
try:
input_api.json.load(open(filename))
except ValueError:
return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]
return []
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def _AreBenchmarksModified(change):
"""Checks whether CL contains any modification to Telemetry benchmarks."""
for affected_file in change.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, _ = os.path.splitext(affected_file_path)
if (os.path.join('tools', 'perf', 'benchmarks') in file_path or
os.path.join('tools', 'perf', 'measurements') in file_path):
return True
return False
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots list to the CL description in order to run
Telemetry benchmarks on Perf trybots in addition to CQ trybots if the CL
contains any changes to Telemetry benchmarks.
"""
benchmarks_modified = _AreBenchmarksModified(change)
rietveld_obj = cl.RpcServer()
issue = cl.issue
original_description = rietveld_obj.get_description(issue)
if not benchmarks_modified or re.search(
r'^CQ_EXTRA_TRYBOTS=.*', original_description, re.M | re.I):
return []
results = []
bots = [
'android_s5_perf_cq',
'winx64_10_perf_cq',
'mac_retina_perf_cq',
'linux_perf_cq'
]
bots = ['tryserver.chromium.perf:%s' % s for s in bots]
bots_string = ';'.join(bots)
description = original_description
description += '\nCQ_EXTRA_TRYBOTS=%s' % bots_string
results.append(output_api.PresubmitNotifyResult(
'Automatically added Perf trybots to run Telemetry benchmarks on CQ.'))
if description != original_description:
rietveld_obj.update_description(issue, description)
return results
| bsd-3-clause |
Webee-IOT/webee210-linux-kernel-3.8 | Documentation/target/tcm_mod_builder.py | 2358 | 40707 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
dmnfarrell/peat | DataPipeline/Custom.py | 1 | 3731 | #!/usr/bin/env python
#
# DataPipeline - A data import and fitting tool
# Copyright (C) 2011 Damien Farrell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: damien.farrell_at_ucd.ie
# Normal mail:
# Damien Farrell
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
import os, sys, math, string, types
import inspect
from datetime import datetime
import ConfigParser, csv
from Importer import BaseImporter
"""Custom Importers should be added here. These will usually sub-classes of
BaseImporter but can also inherit from any of the importers in Base.py
Users should must add an entry to the dictionary below so that the class can
be identified from the format keyword in the config file"""
class KineticsDataImporter(BaseImporter):
"""This is a custom importer to handle the kinetics data supplied as part of the
case study. This data are kinetic assays measured over time intervals per substrate
concentration. The data are grouped in rows per time point, each row is represents a
specific concentration. Each column is one variant.
The importer returns a nested dictionary with variants as keys"""
name = 'kineticsdata' #use as format keyword in conf file
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
"""Common x values for every substrate concentration"""
data = {}
#assumes the column header has labels
names = self.colheaderlabels.split(',')
rowlabels = self.rowheaderlabels.split(',')
if self.rowend == 0:
self.rowend=len(lines)-12
if self.colend == 0:
self.colend = len(labels)
rowstep = self.rowrepeat
for col in range(self.colstart, self.colend):
name = names[col]
if not data.has_key(name):
data[name] = {}
#print name, col
for d in range(0, rowstep):
xdata=[]; ydata=[]
if d < len(rowlabels):
label = rowlabels[d]
else:
label = d
for row in range(self.rowstart, self.rowend, rowstep):
xval = self.getRow(lines, row)[0]
rowdata = self.getRow(lines, row+d)
if len(rowdata) <= col: continue
xdata.append(xval)
if d==0: ind=col+2
else: ind = col
ydata.append(rowdata[ind])
if len(xdata)<=1: continue
x,y = self.getXYValues(xdata,ydata,xformat=self.xformat)
if self.xformat != '':
x = self.convertTimeValues(x)
if self.yformat != '':
y = self.convertTimeValues(y)
if x== None or y==None:
continue
#print x,y
if not data[name].has_key(label):
data[name][label]=[x,y]
return data
def postProcess(self):
"""Post process raw data"""
return
| mit |
Ryanglambert/pybrain | pybrain/rl/agents/logging.py | 31 | 2380 | __author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.rl.agents.agent import Agent
from pybrain.datasets import ReinforcementDataSet
class LoggingAgent(Agent):
""" This agent stores actions, states, and rewards encountered during
interaction with an environment in a ReinforcementDataSet (which is
a variation of SequentialDataSet).
The stored history can be used for learning and is erased by resetting
the agent. It also makes sure that integrateObservation, getAction and
giveReward are called in exactly that order.
"""
logging = True
lastobs = None
lastaction = None
lastreward = None
def __init__(self, indim, outdim, **kwargs):
self.setArgs(**kwargs)
# store input and output dimension
self.indim = indim
self.outdim = outdim
# create the history dataset
self.history = ReinforcementDataSet(indim, outdim)
def integrateObservation(self, obs):
"""Step 1: store the observation received in a temporary variable until action is called and
reward is given. """
self.lastobs = obs
self.lastaction = None
self.lastreward = None
def getAction(self):
"""Step 2: store the action in a temporary variable until reward is given. """
assert self.lastobs != None
assert self.lastaction == None
assert self.lastreward == None
# implement getAction in subclass and set self.lastaction
def giveReward(self, r):
"""Step 3: store observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
assert self.lastreward == None
self.lastreward = r
# store state, action and reward in dataset if logging is enabled
if self.logging:
self.history.addSample(self.lastobs, self.lastaction, self.lastreward)
def newEpisode(self):
""" Indicate the beginning of a new episode in the training cycle. """
if self.logging:
self.history.newSequence()
def reset(self):
""" Clear the history of the agent. """
self.lastobs = None
self.lastaction = None
self.lastreward = None
self.history.clear()
| bsd-3-clause |
JVillella/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_function_test.py | 75 | 5272 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_function helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.platform import test
class FunctionArgCheckTest(test.TestCase):
def testSimple(self):
"""Tests that arg checker works for functions with no varargs or defaults.
"""
def func(x, y, z):
return x + y + z
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, None))
self.assertEqual("exactly 3 arguments",
tpu_function.check_function_argument_count(func, 2, None))
queue = tpu_feed.InfeedQueue(2)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 1, queue))
self.assertEqual("exactly 3 arguments",
tpu_function.check_function_argument_count(func, 2, queue))
def testDefaultArgs(self):
"""Tests that arg checker works for a function with no varargs."""
def func(x, y, z=17):
return x + y + z
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, None))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 1, None))
self.assertEqual("at most 3 arguments",
tpu_function.check_function_argument_count(func, 4, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 1, queue))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 0, queue))
self.assertEqual("at most 3 arguments",
tpu_function.check_function_argument_count(func, 4, queue))
def testVarArgs(self):
"""Tests that arg checker works for a function with varargs."""
def func(x, y, *z):
return x + y + len(z)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 4, None))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 1, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 1, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, queue))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 0, queue))
def testVarArgsAndDefaults(self):
"""Tests that arg checker works for a function with varargs and defaults."""
def func(x, y, z=17, *q):
return x + y + z + len(q)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 4, None))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 5, None))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 1, None))
queue = tpu_feed.InfeedQueue(1)
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 1, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 2, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 3, queue))
self.assertEqual(None,
tpu_function.check_function_argument_count(func, 4, queue))
self.assertEqual("at least 2 arguments",
tpu_function.check_function_argument_count(func, 0, queue))
if __name__ == "__main__":
test.main()
| apache-2.0 |
darjeeling/django | django/db/backends/postgresql/introspection.py | 16 | 10779 | import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
2950: 'UUIDField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(line[0:6] + (field_map[line.name][0] == 'YES', field_map[line.name][1])))
for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", ["public", table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
# The row_number() function for ordering the index fields can be
# replaced by WITH ORDINALITY in the unnest() functions when support
# for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,
array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions
FROM (
SELECT
row_number() OVER () as rnum, c2.relname as indexname,
idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints
| bsd-3-clause |
faywong/FFPlayer | project/jni/python/src/Lib/test/test_quopri.py | 58 | 7365 | from test import test_support
import unittest
import sys, cStringIO, subprocess
import quopri
ENCSAMPLE = """\
Here's a bunch of special=20
=A1=A2=A3=A4=A5=A6=A7=A8=A9
=AA=AB=AC=AD=AE=AF=B0=B1=B2=B3
=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE
=BF=C0=C1=C2=C3=C4=C5=C6
=C7=C8=C9=CA=CB=CC=CD=CE=CF
=D0=D1=D2=D3=D4=D5=D6=D7
=D8=D9=DA=DB=DC=DD=DE=DF
=E0=E1=E2=E3=E4=E5=E6=E7
=E8=E9=EA=EB=EC=ED=EE=EF
=F0=F1=F2=F3=F4=F5=F6=F7
=F8=F9=FA=FB=FC=FD=FE=FF
characters... have fun!
"""
# First line ends with a space
DECSAMPLE = "Here's a bunch of special \n" + \
"""\
\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9
\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3
\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe
\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6
\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf
\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7
\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf
\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7
\xe8\xe9\xea\xeb\xec\xed\xee\xef
\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7
\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff
characters... have fun!
"""
def withpythonimplementation(testfunc):
def newtest(self):
# Test default implementation
testfunc(self)
# Test Python implementation
if quopri.b2a_qp is not None or quopri.a2b_qp is not None:
oldencode = quopri.b2a_qp
olddecode = quopri.a2b_qp
try:
quopri.b2a_qp = None
quopri.a2b_qp = None
testfunc(self)
finally:
quopri.b2a_qp = oldencode
quopri.a2b_qp = olddecode
newtest.__name__ = testfunc.__name__
return newtest
class QuopriTestCase(unittest.TestCase):
# Each entry is a tuple of (plaintext, encoded string). These strings are
# used in the "quotetabs=0" tests.
STRINGS = (
# Some normal strings
('hello', 'hello'),
('''hello
there
world''', '''hello
there
world'''),
('''hello
there
world
''', '''hello
there
world
'''),
('\201\202\203', '=81=82=83'),
# Add some trailing MUST QUOTE strings
('hello ', 'hello=20'),
('hello\t', 'hello=09'),
# Some long lines. First, a single line of 108 characters
('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
# A line of exactly 76 characters, no soft line break should be needed
('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
# A line of 77 characters, forcing a soft line break at position 75,
# and a second line of exactly 2 characters (because the soft line
# break `=' sign counts against the line length limit).
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zz'''),
# A line of 151 characters, forcing a soft line break at position 75,
# with a second line of exactly 76 characters and no trailing =
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# A string containing a hard line break, but which the first line is
# 151 characters and the second line is exactly 76 characters. This
# should leave us with three lines, the first which has a soft line
# break, and which the second and third do not.
('''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# Now some really complex stuff ;)
(DECSAMPLE, ENCSAMPLE),
)
# These are used in the "quotetabs=1" tests.
ESTRINGS = (
('hello world', 'hello=20world'),
('hello\tworld', 'hello=09world'),
)
# These are used in the "header=1" tests.
HSTRINGS = (
('hello world', 'hello_world'),
('hello_world', 'hello=5Fworld'),
)
@withpythonimplementation
def test_encodestring(self):
for p, e in self.STRINGS:
self.assert_(quopri.encodestring(p) == e)
@withpythonimplementation
def test_decodestring(self):
for p, e in self.STRINGS:
self.assert_(quopri.decodestring(e) == p)
@withpythonimplementation
def test_idempotent_string(self):
for p, e in self.STRINGS:
self.assert_(quopri.decodestring(quopri.encodestring(e)) == e)
@withpythonimplementation
def test_encode(self):
for p, e in self.STRINGS:
infp = cStringIO.StringIO(p)
outfp = cStringIO.StringIO()
quopri.encode(infp, outfp, quotetabs=False)
self.assert_(outfp.getvalue() == e)
@withpythonimplementation
def test_decode(self):
for p, e in self.STRINGS:
infp = cStringIO.StringIO(e)
outfp = cStringIO.StringIO()
quopri.decode(infp, outfp)
self.assert_(outfp.getvalue() == p)
@withpythonimplementation
def test_embedded_ws(self):
for p, e in self.ESTRINGS:
self.assert_(quopri.encodestring(p, quotetabs=True) == e)
self.assert_(quopri.decodestring(e) == p)
@withpythonimplementation
def test_encode_header(self):
for p, e in self.HSTRINGS:
self.assert_(quopri.encodestring(p, header=True) == e)
@withpythonimplementation
def test_decode_header(self):
for p, e in self.HSTRINGS:
self.assert_(quopri.decodestring(e, header=True) == p)
def test_scriptencode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cout, cerr = process.communicate(p)
# On Windows, Python will output the result to stdout using
# CRLF, as the mode of stdout is text mode. To compare this
# with the expected result, we need to do a line-by-line comparison.
self.assertEqual(cout.splitlines(), e.splitlines())
def test_scriptdecode(self):
(p, e) = self.STRINGS[-1]
process = subprocess.Popen([sys.executable, "-mquopri", "-d"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cout, cerr = process.communicate(e)
self.assertEqual(cout.splitlines(), p.splitlines())
def test_main():
test_support.run_unittest(QuopriTestCase)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
mksachs/UberCC | Uber.py | 2 | 3909 | #!/usr/bin/env python
import datetime
import numpy as np
'''
A subclass of the Exception class to handle bad increment input in the daterange generator
'''
class DateRangeNotSupported(Exception):
def __init__(self, code):
self.code = code
def __str__(self):
return 'Time increment of "%s" not supported. Increment can be \'hours\', \'days\' or \'minutes\'.'%self.code
'''
A generator that iterates over a series of sequential datetimes with the specified increment.
Can handle increments of 'hours', 'days', or 'minutes'.
'''
def daterange(start_date, end_date, increment='hours'):
if increment == 'minutes':
day_multiple = 24.0 * 60.0
elif increment == 'days':
day_multiple = 1.0
elif increment == 'hours':
day_multiple = 24.0
else:
raise DateRangeNotSupported(increment)
for n in np.arange(((end_date - start_date).days + 1) * day_multiple):
yield start_date.replace(hour=0,minute=0,second=0) + datetime.timedelta(**{increment:n})
'''
A class that accepts login timestamps and returns forecasts for a given hour and day of the week.
TODO: Error checking.
'''
class DemandPredictor(object):
def __init__(self):
# Login counts per day per hour.
self.login_counts = np.zeros((7,24))
# The number of hours for each day that have elapsed. This is
# the denominator of the average we will return.
self.day_counts = np.zeros((7,24))
# This is where we store incomplete hours. Once the hour is
# complete this is emptied out.
self.login_counts_tmp = np.zeros((7,24))
# The last timestamp recieved.
self.last_timestamp = None
def addLogin(self, timestamp):
# Update the temporary login counts.
self.login_counts_tmp[timestamp.weekday(), timestamp.hour] += 1.0
# If this is the first timestamp we will have no previous one.
if self.last_timestamp is None:
self.last_timestamp = timestamp
# Check if we are in a new day. This is how we update the day counts for
# the denominator of the forecast.
self.check_day(timestamp)
def check_day(self, timestamp):
# Is the new timestamp in a new day?
if not (timestamp.date() == self.last_timestamp.date() and timestamp.hour == self.last_timestamp.hour):
hour_difference = timestamp.hour - self.last_timestamp.hour
# If the new timestamp skipped hours we still need to count them
# to get a good average.
if hour_difference > 1.0 or hour_difference < -23.0:
for n in range(int(hour_difference) - 1):
missed_hour = self.last_timestamp + datetime.timedelta(**{'hours':n + 1})
self.day_counts[missed_hour.weekday(), missed_hour.hour] += 1.0
self.day_counts[self.last_timestamp.weekday(), self.last_timestamp.hour] += 1.0
self.login_counts[self.last_timestamp.weekday(), self.last_timestamp.hour] += self.login_counts_tmp[self.last_timestamp.weekday(), self.last_timestamp.hour]
self.login_counts_tmp[self.last_timestamp.weekday(), self.last_timestamp.hour] = 0.0
else:
self.day_counts[self.last_timestamp.weekday(), self.last_timestamp.hour] += 1.0
self.login_counts[self.last_timestamp.weekday(), self.last_timestamp.hour] += self.login_counts_tmp[self.last_timestamp.weekday(), self.last_timestamp.hour]
self.login_counts_tmp[self.last_timestamp.weekday(), self.last_timestamp.hour] = 0.0
self.last_timestamp = timestamp
def forecast(self, day, hour):
# Return the forecast or '-' if we haven't collected enough data.
if self.day_counts[day, hour] != 0:
return self.login_counts[day, hour] / self.day_counts[day, hour]
else:
return '-'
| mit |
h3biomed/ansible | test/units/modules/network/f5/test_bigip_ssl_certificate.py | 16 | 4735 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_ssl_certificate import ArgumentSpec
from library.modules.bigip_ssl_certificate import ApiParameters
from library.modules.bigip_ssl_certificate import ModuleParameters
from library.modules.bigip_ssl_certificate import ModuleManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_ssl_certificate import ArgumentSpec
from ansible.modules.network.f5.bigip_ssl_certificate import ApiParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_cert(self):
cert_content = load_fixture('create_insecure_cert1.crt')
args = dict(
content=cert_content,
name="cert1",
partition="Common",
state="present",
)
p = ModuleParameters(params=args)
assert p.name == 'cert1'
assert p.filename == 'cert1.crt'
assert 'Signature Algorithm' in p.content
assert '-----BEGIN CERTIFICATE-----' in p.content
assert '-----END CERTIFICATE-----' in p.content
assert p.checksum == '1e55aa57ee166a380e756b5aa4a835c5849490fe'
assert p.state == 'present'
def test_module_issuer_cert_key(self):
args = dict(
issuer_cert='foo',
partition="Common",
)
p = ModuleParameters(params=args)
assert p.issuer_cert == '/Common/foo.crt'
def test_api_issuer_cert_key(self):
args = load_fixture('load_sys_file_ssl_cert_with_issuer_cert.json')
p = ApiParameters(params=args)
assert p.issuer_cert == '/Common/intermediate.crt'
class TestCertificateManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_import_certificate_and_key_no_key_passphrase(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('cert1.crt'),
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_import_certificate_chain(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('chain1.crt'),
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
CiscoSystems/openstack-dashboard | django-openstack/django_openstack/syspanel/urls.py | 7 | 2972 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import *
from django.conf import settings
INSTANCES = r'^instances/(?P<instance_id>[^/]+)/%s$'
IMAGES = r'^images/(?P<image_id>[^/]+)/%s$'
USERS = r'^users/(?P<user_id>[^/]+)/%s$'
TENANTS = r'^tenants/(?P<tenant_id>[^/]+)/%s$'
urlpatterns = patterns('django_openstack.syspanel.views.instances',
url(r'^usage/(?P<tenant_id>[^/]+)$', 'tenant_usage',
name='syspanel_tenant_usage'),
url(r'^instances/$', 'index', name='syspanel_instances'),
url(r'^instances/refresh$', 'refresh', name='syspanel_instances_refresh'),
# NOTE(termie): currently just using the 'dash' versions
#url(INSTANCES % 'console', 'console', name='syspanel_instances_console'),
#url(INSTANCES % 'vnc', 'vnc', name='syspanel_instances_vnc'),
)
urlpatterns += patterns('django_openstack.syspanel.views.images',
url(r'^images/$', 'index', name='syspanel_images'),
url(IMAGES % 'update', 'update', name='syspanel_images_update'),
#url(INSTANCES % 'vnc', 'vnc', name='syspanel_instances_vnc'),
)
urlpatterns += patterns('django_openstack.syspanel.views.quotas',
url(r'^quotas/$', 'index', name='syspanel_quotas'),
)
urlpatterns += patterns('django_openstack.syspanel.views.flavors',
url(r'^flavors/$', 'index', name='syspanel_flavors'),
url(r'^flavors/create/$', 'create', name='syspanel_flavors_create'),
)
urlpatterns += patterns('django_openstack.syspanel.views.users',
url(r'^users/$', 'index', name='syspanel_users'),
url(USERS % 'update', 'update', name='syspanel_users_update'),
url(r'^users/create$', 'create', name='syspanel_users_create'),
)
urlpatterns += patterns('django_openstack.syspanel.views.services',
url(r'^services/$', 'index', name='syspanel_services'),
)
urlpatterns += patterns('django_openstack.syspanel.views.tenants',
url(r'^tenants/$', 'index', name='syspanel_tenants'),
url(r'^tenants/create$', 'create', name='syspanel_tenants_create'),
url(TENANTS % 'update', 'update', name='syspanel_tenant_update'),
url(TENANTS % 'users', 'users', name='syspanel_tenant_users'),
url(TENANTS % 'quotas', 'quotas', name='syspanel_tenant_quotas'),
)
| apache-2.0 |
DMOJ/site | judge/bridge/django_handler.py | 1 | 1990 | import json
import logging
import struct
from judge.bridge.base_handler import Disconnect, ZlibPacketHandler
logger = logging.getLogger('judge.bridge')
size_pack = struct.Struct('!I')
class DjangoHandler(ZlibPacketHandler):
def __init__(self, request, client_address, server, judges):
super().__init__(request, client_address, server)
self.handlers = {
'submission-request': self.on_submission,
'terminate-submission': self.on_termination,
'disconnect-judge': self.on_disconnect_request,
}
self.judges = judges
def send(self, data):
super().send(json.dumps(data, separators=(',', ':')))
def on_packet(self, packet):
packet = json.loads(packet)
try:
result = self.handlers.get(packet.get('name', None), self.on_malformed)(packet)
except Exception:
logger.exception('Error in packet handling (Django-facing)')
result = {'name': 'bad-request'}
self.send(result)
raise Disconnect()
def on_submission(self, data):
id = data['submission-id']
problem = data['problem-id']
language = data['language']
source = data['source']
judge_id = data['judge-id']
priority = data['priority']
if not self.judges.check_priority(priority):
return {'name': 'bad-request'}
self.judges.judge(id, problem, language, source, judge_id, priority)
return {'name': 'submission-received', 'submission-id': id}
def on_termination(self, data):
return {'name': 'submission-received', 'judge-aborted': self.judges.abort(data['submission-id'])}
def on_disconnect_request(self, data):
judge_id = data['judge-id']
force = data['force']
self.judges.disconnect(judge_id, force=force)
def on_malformed(self, packet):
logger.error('Malformed packet: %s', packet)
def on_close(self):
self._to_kill = False
| agpl-3.0 |
Bjwebb/detecting-clouds | totalimage.py | 1 | 2943 | from django.core.management import setup_environ
import clouds.settings
setup_environ(clouds.settings)
from django.db.models import Count, Sum, Avg, F
from django.db.models.query import QuerySet
from clouds.models import RealPoint
import PIL.Image, PIL.ImageDraw
import pickle, os
from django.db import connection
minimum_points = 200
generation=1
if os.path.exists('totalimage.pickle'):
realpoints = pickle.load(open('totalimage.pickle', 'r'))
else:
kwargs = dict(line__linevalues__generation__pk=2, line__linevalues__realpoint_count__gt=minimum_points)
"""
realpoints = RealPoint.objects.filter(
#realpoints = RealPoint.objects.filter(x__gt=0,x__lt=40,y__gt=0,y__lt=90,
#realpoints = RealPoint.objects.filter(x__gt=190,x__lt=200,y__gt=190,y__lt=200,
generation=generation, sidpoint__isnull=False, active=True,
**kwargs).extra(select={
'ix':'floor(clouds_realpoint.x)',
'iy':'floor(clouds_realpoint.y)',
# 'deviation': 'AVG(clouds_realpoint.flux/clouds_linevalues.median_flux)',
}).values('ix', 'iy').annotate(
sum=Sum('flux'),
count=Count('pk'),
avg=Avg('flux'),
).order_by()
"""
cursor = connection.cursor()
cursor.execute("""SELECT (floor(clouds_realpoint.y)) AS "iy", (floor(clouds_realpoint.x)) AS "ix", COUNT("clouds_realpoint"."id") AS "count", SUM("clouds_realpoint"."flux") AS "sum", AVG("clouds_realpoint"."flux") AS "avg", AVG(clouds_realpoint.flux/clouds_linevalues.median_flux) AS "deviation"
FROM "clouds_realpoint"
INNER JOIN "clouds_line" ON ("clouds_realpoint"."line_id" = "clouds_line"."id")
INNER JOIN "clouds_linevalues" ON ("clouds_line"."id" = "clouds_linevalues"."line_id")
INNER JOIN "clouds_sidpoint" ON ("clouds_realpoint"."sidpoint_id" = "clouds_sidpoint"."id")
INNER JOIN "clouds_image" ON ("clouds_realpoint"."image_id" = "clouds_image"."id")
WHERE ("clouds_realpoint"."active" = True
AND "clouds_linevalues"."generation_id" = 3
AND "clouds_realpoint"."generation_id" = 1
AND "clouds_linevalues"."realpoint_count" > 200
AND "clouds_sidpoint"."id" IS NOT NULL
AND "clouds_image"."moon" = False)
GROUP BY (floor(clouds_realpoint.y)), (floor(clouds_realpoint.x))""")
realpoints = cursor.fetchall()
pickle.dump(list(realpoints), open('totalimage.pickle', 'w'))
#print list(realpoints.values('line').distinct())
#print realpoints
print len(realpoints)
#import sys
#sys.exit()
i = {'iy':0, 'ix':1, 'count':2, 'sum':3, 'avg':4, 'deviation':5}
for agg, scale in [('count',1), ('sum',5000.0), ('avg',50.0), ('deviation', 0.01) ]:
im = PIL.Image.new('RGB', (640,480), (0,0,0))
draw = PIL.ImageDraw.Draw(im)
for realpoint in realpoints:
col = int(realpoint[i[agg]] / scale)
if col > 255: col = 255
draw.point((realpoint[i['ix']],realpoint[i['iy']]), fill=(col,col,col))
im.save('totalimage_{0}.png'.format(agg))
| mit |
TheNeikos/rust | src/etc/tidy.py | 31 | 6552 | # Copyright 2010-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import fileinput
import subprocess
import re
import os
from licenseck import check_license
import snapshot
err = 0
cols = 100
cr_flag = "ignore-tidy-cr"
tab_flag = "ignore-tidy-tab"
linelength_flag = "ignore-tidy-linelength"
interesting_files = ['.rs', '.py', '.js', '.sh', '.c', '.h']
uninteresting_files = ['miniz.c', 'jquery', 'rust_android_dummy']
def report_error_name_no(name, no, s):
global err
print("%s:%d: %s" % (name, no, s))
err = 1
def report_err(s):
report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def report_warn(s):
print("%s:%d: %s" % (fileinput.filename(),
fileinput.filelineno(),
s))
def do_license_check(name, contents):
if not check_license(name, contents):
report_error_name_no(name, 1, "incorrect license")
def update_counts(current_name):
global file_counts
global count_other_linted_files
_, ext = os.path.splitext(current_name)
if ext in interesting_files:
file_counts[ext] += 1
else:
count_other_linted_files += 1
def interesting_file(f):
if any(x in f for x in uninteresting_files):
return False
return any(os.path.splitext(f)[1] == ext for ext in interesting_files)
# Be careful to support Python 2.4, 2.6, and 3.x here!
config_proc = subprocess.Popen(["git", "config", "core.autocrlf"],
stdout=subprocess.PIPE)
result = config_proc.communicate()[0]
true = "true".encode('utf8')
autocrlf = result.strip() == true if result is not None else False
current_name = ""
current_contents = ""
check_tab = True
check_cr = True
check_linelength = True
if len(sys.argv) < 2:
print("usage: tidy.py <src-dir>")
sys.exit(1)
src_dir = sys.argv[1]
count_lines = 0
count_non_blank_lines = 0
count_other_linted_files = 0
file_counts = {ext: 0 for ext in interesting_files}
all_paths = set()
try:
for (dirpath, dirnames, filenames) in os.walk(src_dir):
# Skip some third-party directories
skippable_dirs = {
'src/jemalloc',
'src/llvm',
'src/gyp',
'src/libbacktrace',
'src/libuv',
'src/compiler-rt',
'src/rt/hoedown',
'src/rustllvm',
'src/rt/valgrind',
'src/rt/msvc',
'src/rust-installer'
}
if any(d in dirpath for d in skippable_dirs):
continue
file_names = [os.path.join(dirpath, f) for f in filenames
if interesting_file(f)
and not f.endswith("_gen.rs")
and not ".#" is f]
if not file_names:
continue
for line in fileinput.input(file_names,
openhook=fileinput.hook_encoded("utf-8")):
filename = fileinput.filename()
if "tidy.py" not in filename:
if "TODO" in line:
report_err("TODO is deprecated; use FIXME")
match = re.match(r'^.*/(\*|/!?)\s*XXX', line)
if match:
report_err("XXX is no longer necessary, use FIXME")
match = re.match(r'^.*//\s*(NOTE.*)$', line)
if match and "TRAVIS" not in os.environ:
m = match.group(1)
if "snap" in m.lower():
report_warn(match.group(1))
match = re.match(r'^.*//\s*SNAP\s+(\w+)', line)
if match:
hsh = match.group(1)
date, rev = snapshot.curr_snapshot_rev()
if not hsh.startswith(rev):
report_err("snapshot out of date (" + date
+ "): " + line)
else:
if "SNAP" in line:
report_warn("unmatched SNAP line: " + line)
if cr_flag in line:
check_cr = False
if tab_flag in line:
check_tab = False
if linelength_flag in line:
check_linelength = False
if check_tab and ('\t' in line and
"Makefile" not in filename):
report_err("tab character")
if check_cr and not autocrlf and '\r' in line:
report_err("CR character")
if line.endswith(" \n") or line.endswith("\t\n"):
report_err("trailing whitespace")
line_len = len(line)-2 if autocrlf else len(line)-1
if check_linelength and line_len > cols:
report_err("line longer than %d chars" % cols)
if fileinput.isfirstline():
# This happens at the end of each file except the last.
if current_name != "":
update_counts(current_name)
assert len(current_contents) > 0
do_license_check(current_name, current_contents)
current_name = filename
current_contents = ""
check_cr = True
check_tab = True
check_linelength = True
# Put a reasonable limit on the amount of header data we use for
# the licenseck
if len(current_contents) < 1000:
current_contents += line
count_lines += 1
if line.strip():
count_non_blank_lines += 1
if current_name != "":
update_counts(current_name)
assert len(current_contents) > 0
do_license_check(current_name, current_contents)
except UnicodeDecodeError as e:
report_err("UTF-8 decoding error " + str(e))
print
for ext in sorted(file_counts, key=file_counts.get, reverse=True):
print("* linted {} {} files".format(file_counts[ext], ext))
print("* linted {} other files".format(count_other_linted_files))
print("* total lines of code: {}".format(count_lines))
print("* total non-blank lines of code: {}".format(count_non_blank_lines))
print()
sys.exit(err)
| apache-2.0 |
danakj/chromium | tools/android/loading/report_unittest.py | 4 | 13616 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import metrics
import report
from queuing_lens import QueuingLens
import test_utils
import user_satisfied_lens_unittest
class LoadingReportTestCase(unittest.TestCase):
MILLI_TO_MICRO = 1000
_NAVIGATION_START_TIME = 12
_FIRST_REQUEST_TIME = 15
_CONTENTFUL_PAINT = 120
_TEXT_PAINT = 30
_SIGNIFICANT_PAINT = 50
_DURATION = 400
_REQUEST_OFFSET = 5
_LOAD_END_TIME = 1280
_MAIN_FRAME_ID = 1
_FIRST_REQUEST_DATA_LENGTH = 128
_SECOND_REQUEST_DATA_LENGTH = 1024
_TOPLEVEL_EVENT_OFFSET = 10
_TOPLEVEL_EVENT_DURATION = 100
_SCRIPT_EVENT_DURATION = 50
_PARSING_EVENT_DURATION = 60
def setUp(self):
self.trace_creator = test_utils.TraceCreator()
self.requests = [
self.trace_creator.RequestAt(self._FIRST_REQUEST_TIME, frame_id=1),
self.trace_creator.RequestAt(
self._NAVIGATION_START_TIME + self._REQUEST_OFFSET, self._DURATION)]
self.requests[0].timing.receive_headers_end = 0
self.requests[1].timing.receive_headers_end = 0
self.requests[0].encoded_data_length = self._FIRST_REQUEST_DATA_LENGTH
self.requests[1].encoded_data_length = self._SECOND_REQUEST_DATA_LENGTH
self.ad_domain = 'i-ve-got-the-best-ads.com'
self.ad_url = 'http://www.' + self.ad_domain + '/i-m-really-rich.js'
self.requests[0].url = self.ad_url
self.trace_events = [
{'args': {'name': 'CrRendererMain'}, 'cat': '__metadata',
'name': 'thread_name', 'ph': 'M', 'pid': 1, 'tid': 1, 'ts': 0},
{'ts': self._NAVIGATION_START_TIME * self.MILLI_TO_MICRO, 'ph': 'R',
'cat': 'blink.user_timing', 'pid': 1, 'tid': 1,
'name': 'navigationStart',
'args': {'frame': 1}},
{'ts': self._LOAD_END_TIME * self.MILLI_TO_MICRO, 'ph': 'I',
'cat': 'devtools.timeline', 'pid': 1, 'tid': 1,
'name': 'MarkLoad',
'args': {'data': {'isMainFrame': True}}},
{'ts': self._CONTENTFUL_PAINT * self.MILLI_TO_MICRO, 'ph': 'I',
'cat': 'blink.user_timing', 'pid': 1, 'tid': 1,
'name': 'firstContentfulPaint',
'args': {'frame': self._MAIN_FRAME_ID}},
{'ts': self._TEXT_PAINT * self.MILLI_TO_MICRO, 'ph': 'I',
'cat': 'blink.user_timing', 'pid': 1, 'tid': 1,
'name': 'firstPaint',
'args': {'frame': self._MAIN_FRAME_ID}},
{'ts': 90 * self.MILLI_TO_MICRO, 'ph': 'I',
'cat': 'blink', 'pid': 1, 'tid': 1,
'name': 'FrameView::synchronizedPaint'},
{'ts': self._SIGNIFICANT_PAINT * self.MILLI_TO_MICRO, 'ph': 'I',
'cat': 'foobar', 'name': 'biz', 'pid': 1, 'tid': 1,
'args': {'counters': {
'LayoutObjectsThatHadNeverHadLayout': 10}}},
{'ts': (self._NAVIGATION_START_TIME - self._TOPLEVEL_EVENT_OFFSET)
* self.MILLI_TO_MICRO,
'pid': 1, 'tid': 1, 'ph': 'X',
'dur': self._TOPLEVEL_EVENT_DURATION * self.MILLI_TO_MICRO,
'cat': 'toplevel', 'name': 'MessageLoop::RunTask'},
{'ts': self._NAVIGATION_START_TIME * self.MILLI_TO_MICRO,
'pid': 1, 'tid': 1, 'ph': 'X',
'dur': self._PARSING_EVENT_DURATION * self.MILLI_TO_MICRO,
'cat': 'devtools.timeline', 'name': 'ParseHTML',
'args': {'beginData': {'url': ''}}},
{'ts': self._NAVIGATION_START_TIME * self.MILLI_TO_MICRO,
'pid': 1, 'tid': 1, 'ph': 'X',
'dur': self._SCRIPT_EVENT_DURATION * self.MILLI_TO_MICRO,
'cat': 'devtools.timeline', 'name': 'EvaluateScript',
'args': {'data': {'scriptName': ''}}}]
def _MakeTrace(self):
trace = self.trace_creator.CreateTrace(
self.requests, self.trace_events, self._MAIN_FRAME_ID)
return trace
def _AddQueuingEvents(self, source_id, url, start_msec, ready_msec, end_msec):
self.trace_events.extend([
{'args': {
'data': {
'request_url': url,
'source_id': source_id
}
},
'cat': QueuingLens.QUEUING_CATEGORY,
'id': source_id,
'pid': 1, 'tid': 10,
'name': QueuingLens.ASYNC_NAME,
'ph': 'b',
'ts': start_msec * self.MILLI_TO_MICRO
},
{'args': {
'data': {
'source_id': source_id
}
},
'cat': QueuingLens.QUEUING_CATEGORY,
'id': source_id,
'pid': 1, 'tid': 10,
'name': QueuingLens.READY_NAME,
'ph': 'n',
'ts': ready_msec * self.MILLI_TO_MICRO
},
{'args': {
'data': {
'source_id': source_id
}
},
'cat': QueuingLens.QUEUING_CATEGORY,
'id': source_id,
'pid': 1, 'tid': 10,
'name': QueuingLens.ASYNC_NAME,
'ph': 'e',
'ts': end_msec * self.MILLI_TO_MICRO
}])
def testGenerateReport(self):
trace = self._MakeTrace()
loading_report = report.LoadingReport(trace).GenerateReport()
self.assertEqual(trace.url, loading_report['url'])
self.assertEqual(self._TEXT_PAINT - self._NAVIGATION_START_TIME,
loading_report['first_text_ms'])
self.assertEqual(self._SIGNIFICANT_PAINT - self._NAVIGATION_START_TIME,
loading_report['significant_ms'])
self.assertEqual(self._CONTENTFUL_PAINT - self._NAVIGATION_START_TIME,
loading_report['contentful_ms'])
self.assertAlmostEqual(self._LOAD_END_TIME - self._NAVIGATION_START_TIME,
loading_report['plt_ms'])
self.assertEqual(2, loading_report['total_requests'])
self.assertAlmostEqual(0.34, loading_report['contentful_byte_frac'], 2)
self.assertAlmostEqual(0.1844, loading_report['significant_byte_frac'], 2)
self.assertEqual(2, loading_report['plt_requests'])
self.assertEqual(1, loading_report['first_text_requests'])
self.assertEqual(1, loading_report['contentful_requests'])
self.assertEqual(1, loading_report['significant_requests'])
self.assertEqual(1, loading_report['plt_preloaded_requests'])
self.assertEqual(1, loading_report['first_text_preloaded_requests'])
self.assertEqual(1, loading_report['contentful_preloaded_requests'])
self.assertEqual(1, loading_report['significant_preloaded_requests'])
self.assertEqual(401, loading_report['plt_requests_cost'])
self.assertEqual(1, loading_report['first_text_requests_cost'])
self.assertEqual(1, loading_report['contentful_requests_cost'])
self.assertEqual(1, loading_report['significant_requests_cost'])
self.assertEqual(1, loading_report['plt_preloaded_requests_cost'])
self.assertEqual(1, loading_report['first_text_preloaded_requests_cost'])
self.assertEqual(1, loading_report['contentful_preloaded_requests_cost'])
self.assertEqual(1, loading_report['significant_preloaded_requests_cost'])
self.assertEqual(400, loading_report['plt_predicted_no_state_prefetch_ms'])
self.assertEqual(14,
loading_report['first_text_predicted_no_state_prefetch_ms'])
self.assertEqual(104,
loading_report['contentful_predicted_no_state_prefetch_ms'])
self.assertEqual(74,
loading_report['significant_predicted_no_state_prefetch_ms'])
self.assertEqual('', loading_report['contentful_inversion'])
self.assertEqual('', loading_report['significant_inversion'])
self.assertIsNone(loading_report['ad_requests'])
self.assertIsNone(loading_report['ad_or_tracking_requests'])
self.assertIsNone(loading_report['ad_or_tracking_initiated_requests'])
self.assertIsNone(loading_report['ad_or_tracking_initiated_transfer_size'])
self.assertIsNone(loading_report['ad_or_tracking_script_frac'])
self.assertIsNone(loading_report['ad_or_tracking_parsing_frac'])
self.assertEqual(
self._FIRST_REQUEST_DATA_LENGTH + self._SECOND_REQUEST_DATA_LENGTH
+ metrics.HTTP_OK_LENGTH * 2,
loading_report['transfer_size'])
self.assertEqual(0, loading_report['total_queuing_blocked_msec'])
self.assertEqual(0, loading_report['total_queuing_load_msec'])
self.assertEqual(0, loading_report['average_blocking_request_count'])
self.assertEqual(0, loading_report['median_blocking_request_count'])
def testInversion(self):
self.requests[0].timing.loading_finished = 4 * (
self._REQUEST_OFFSET + self._DURATION)
self.requests[1].initiator['type'] = 'parser'
self.requests[1].initiator['url'] = self.requests[0].url
for e in self.trace_events:
if e['name'] == 'firstContentfulPaint':
e['ts'] = self.MILLI_TO_MICRO * (
self._FIRST_REQUEST_TIME + self._REQUEST_OFFSET +
self._DURATION + 1)
break
loading_report = report.LoadingReport(self._MakeTrace()).GenerateReport()
self.assertEqual(self.requests[0].url,
loading_report['contentful_inversion'])
self.assertEqual('', loading_report['significant_inversion'])
def testPltNoLoadEvents(self):
trace = self._MakeTrace()
# Change the MarkLoad events.
for e in trace.tracing_track.GetEvents():
if e.name == 'MarkLoad':
e.tracing_event['name'] = 'dummy'
loading_report = report.LoadingReport(trace).GenerateReport()
self.assertAlmostEqual(self._REQUEST_OFFSET + self._DURATION,
loading_report['plt_ms'])
def testAdTrackingRules(self):
trace = self._MakeTrace()
loading_report = report.LoadingReport(
trace, [self.ad_domain], []).GenerateReport()
self.assertEqual(1, loading_report['ad_requests'])
self.assertEqual(1, loading_report['ad_or_tracking_requests'])
self.assertEqual(1, loading_report['ad_or_tracking_initiated_requests'])
self.assertIsNone(loading_report['tracking_requests'])
self.assertEqual(
self._FIRST_REQUEST_DATA_LENGTH + metrics.HTTP_OK_LENGTH,
loading_report['ad_or_tracking_initiated_transfer_size'])
def testThreadBusyness(self):
loading_report = report.LoadingReport(self._MakeTrace()).GenerateReport()
self.assertAlmostEqual(
1., loading_report['significant_activity_frac'])
self.assertAlmostEqual(
float(self._TOPLEVEL_EVENT_DURATION - self._TOPLEVEL_EVENT_OFFSET)
/ (self._CONTENTFUL_PAINT - self._NAVIGATION_START_TIME),
loading_report['contentful_activity_frac'])
self.assertAlmostEqual(
float(self._TOPLEVEL_EVENT_DURATION - self._TOPLEVEL_EVENT_OFFSET)
/ (self._LOAD_END_TIME - self._NAVIGATION_START_TIME),
loading_report['plt_activity_frac'])
def testActivityBreakdown(self):
loading_report = report.LoadingReport(self._MakeTrace()).GenerateReport()
load_time = float(self._LOAD_END_TIME - self._NAVIGATION_START_TIME)
contentful_time = float(
self._CONTENTFUL_PAINT - self._NAVIGATION_START_TIME)
self.assertAlmostEqual(self._SCRIPT_EVENT_DURATION / load_time,
loading_report['plt_script_frac'])
self.assertAlmostEqual(
(self._PARSING_EVENT_DURATION - self._SCRIPT_EVENT_DURATION)
/ load_time,
loading_report['plt_parsing_frac'])
self.assertAlmostEqual(1., loading_report['significant_script_frac'])
self.assertAlmostEqual(0., loading_report['significant_parsing_frac'])
self.assertAlmostEqual(self._SCRIPT_EVENT_DURATION / contentful_time,
loading_report['contentful_script_frac'])
self.assertAlmostEqual(
(self._PARSING_EVENT_DURATION - self._SCRIPT_EVENT_DURATION)
/ contentful_time, loading_report['contentful_parsing_frac'])
def testAdsAndTrackingCost(self):
load_time = float(self._LOAD_END_TIME - self._NAVIGATION_START_TIME)
self.trace_events.append(
{'ts': load_time / 3. * self.MILLI_TO_MICRO,
'pid': 1, 'tid': 1, 'ph': 'X',
'dur': load_time / 2. * self.MILLI_TO_MICRO,
'cat': 'devtools.timeline', 'name': 'EvaluateScript',
'args': {'data': {'scriptName': self.ad_url}}})
loading_report = report.LoadingReport(
self._MakeTrace(), [self.ad_domain]).GenerateReport()
self.assertAlmostEqual(.5, loading_report['ad_or_tracking_script_frac'], 2)
self.assertAlmostEqual(0., loading_report['ad_or_tracking_parsing_frac'])
def testQueueStats(self):
# We use three requests, A, B and C. A is not blocked, B is blocked by A,
# and C blocked by A and B.
BASE_MSEC = self._FIRST_REQUEST_TIME + 4 * self._DURATION
self.requests = []
request_A = self.trace_creator.RequestAt(BASE_MSEC, 5)
request_B = self.trace_creator.RequestAt(BASE_MSEC + 6, 5)
request_C = self.trace_creator.RequestAt(BASE_MSEC + 12, 10)
self.requests.extend([request_A, request_B, request_C])
self._AddQueuingEvents(10, request_A.url,
BASE_MSEC, BASE_MSEC, BASE_MSEC + 5)
self._AddQueuingEvents(20, request_B.url,
BASE_MSEC + 1, BASE_MSEC + 6, BASE_MSEC + 11)
self._AddQueuingEvents(30, request_C.url,
BASE_MSEC + 2, BASE_MSEC + 12, BASE_MSEC + 22)
loading_report = report.LoadingReport(self._MakeTrace()).GenerateReport()
self.assertEqual(15, loading_report['total_queuing_blocked_msec'])
self.assertEqual(35, loading_report['total_queuing_load_msec'])
self.assertAlmostEqual(1, loading_report['average_blocking_request_count'])
self.assertEqual(1, loading_report['median_blocking_request_count'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
TGAC/KAT | deps/boost/tools/build/src/tools/gcc.py | 8 | 39530 | # Status: being ported by Steven Watanabe
# Base revision: 47077
# TODO: common.jam needs to be ported
# TODO: generators.jam needs to have register_c_compiler.
#
# Copyright 2001 David Abrahams.
# Copyright 2002-2006 Rene Rivera.
# Copyright 2002-2003 Vladimir Prus.
# Copyright (c) 2005 Reece H. Dunn.
# Copyright 2006 Ilya Sokolov.
# Copyright 2007 Roland Schwarz
# Copyright 2007 Boris Gubenko.
# Copyright 2008 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import subprocess
import re
import bjam
from b2.tools import unix, common, rc, pch, builtin
from b2.build import feature, type, toolset, generators, property_set
from b2.build.property import Property
from b2.util.utility import os_name, on_windows
from b2.manager import get_manager
from b2.build.generators import Generator
from b2.build.toolset import flags
from b2.util.utility import to_seq
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
feature.extend('toolset', ['gcc'])
toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll'])
toolset.inherit_flags('gcc', 'unix')
toolset.inherit_rules('gcc', 'unix')
generators.override('gcc.prebuilt', 'builtin.prebuilt')
generators.override('gcc.searched-lib-generator', 'searched-lib-generator')
# Target naming is determined by types/lib.jam and the settings below this
# comment.
#
# On *nix:
# libxxx.a static library
# libxxx.so shared library
#
# On windows (mingw):
# libxxx.lib static library
# xxx.dll DLL
# xxx.lib import library
#
# On windows (cygwin) i.e. <target-os>cygwin
# libxxx.a static library
# xxx.dll DLL
# libxxx.dll.a import library
#
# Note: user can always override by using the <tag>@rule
# This settings have been choosen, so that mingw
# is in line with msvc naming conventions. For
# cygwin the cygwin naming convention has been choosen.
# Make the "o" suffix used for gcc toolset on all
# platforms
type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o')
type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a')
type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a')
type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib')
__machine_match = re.compile('^([^ ]+)')
__version_match = re.compile('^([0-9.]+)')
def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options)
root = root[0] if root else ''
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
flavor = flavor[0] if flavor else ''
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure([rc_command], condition, ['<rc-type>' + rc_type])
###if [ os.name ] = NT
###{
### # This causes single-line command invocation to not go through .bat files,
### # thus avoiding command-line length limitations.
### JAMSHELL = % ;
###}
#FIXME: when register_c_compiler is moved to
# generators, these should be updated
builtin.register_c_compiler('gcc.compile.c++.preprocess', ['CPP'], ['PREPROCESSED_CPP'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c.preprocess', ['C'], ['PREPROCESSED_C'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc'])
# pch support
# The compiler looks for a precompiled header in each directory just before it
# looks for the include file in that directory. The name searched for is the
# name specified in the #include directive with ".gch" suffix appended. The
# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to
# full name of the header.
type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch')
# GCC-specific pch generator.
class GccPchGenerator(pch.PchGenerator):
# Inherit the __init__ method
def run_pch(self, project, name, prop_set, sources):
# Find the header in sources. Ignore any CPP sources.
header = None
for s in sources:
if type.is_derived(s.type(), 'H'):
header = s
# Error handling: Base header file name should be the same as the base
# precompiled header name.
header_name = header.name()
header_basename = os.path.basename(header_name).rsplit('.', 1)[0]
if header_basename != name:
location = project.project_module
###FIXME:
raise Exception()
### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ;
pch_file = Generator.run(self, project, name, prop_set, [header])
# return result of base class and pch-file property as usage-requirements
# FIXME: what about multiple results from generator.run?
return (property_set.create([Property('pch-file', pch_file[0]),
Property('cflags', '-Winvalid-pch')]),
pch_file)
# Calls the base version specifying source's name as the name of the created
# target. As result, the PCH will be named whatever.hpp.gch, and not
# whatever.gch.
def generated_targets(self, sources, prop_set, project, name = None):
name = sources[0].name()
return Generator.generated_targets(self, sources,
prop_set, project, name)
# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The
# latter have HPP type, but HPP type is derived from H. The type of compilation
# is determined entirely by the destination type.
generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ]))
generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ]))
# Override default do-nothing generators.
generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator')
generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator')
flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>'])
# Declare flags and action for compilation
flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0'])
flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3'])
flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os'])
flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w'])
flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall'])
flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic'])
flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror'])
flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.compile.c++', 'OPTIONS', ['<rtti>off'], ['-fno-rtti'])
flags('gcc.compile.c++', 'OPTIONS', ['<exception-handling>off'], ['-fno-exceptions'])
# On cygwin and mingw, gcc generates position independent code by default, and
# warns if -fPIC is specified. This might not be the right way of checking if
# we're using cygwin. For example, it's possible to run cygwin gcc from NT
# shell, or using crosscompiling. But we'll solve that problem when it's time.
# In that case we'll just add another parameter to 'init' and move this login
# inside 'init'.
if not os_name () in ['CYGWIN', 'NT']:
# This logic will add -fPIC for all compilations:
#
# lib a : a.cpp b ;
# obj b : b.cpp ;
# exe c : c.cpp a d ;
# obj d : d.cpp ;
#
# This all is fine, except that 'd' will be compiled with -fPIC even though
# it's not needed, as 'd' is used only in exe. However, it's hard to detect
# where a target is going to be used. Alternative, we can set -fPIC only
# when main target type is LIB but than 'b' will be compiled without -fPIC.
# In x86-64 that will lead to link errors. So, compile everything with
# -fPIC.
#
# Yet another alternative would be to create propagated <sharedable>
# feature, and set it when building shared libraries, but that's hard to
# implement and will increase target path length even more.
flags('gcc.compile', 'OPTIONS', ['<link>shared'], ['-fPIC'])
if os_name() != 'NT' and os_name() != 'OSF' and os_name() != 'HPUX':
# OSF does have an option called -soname but it doesn't seem to work as
# expected, therefore it has been disabled.
HAVE_SONAME = ''
SONAME_OPTION = '-h'
flags('gcc.compile', 'USER_OPTIONS', [], ['<cflags>'])
flags('gcc.compile.c++', 'USER_OPTIONS',[], ['<cxxflags>'])
flags('gcc.compile', 'DEFINES', [], ['<define>'])
flags('gcc.compile', 'INCLUDES', [], ['<include>'])
engine = get_manager().engine()
engine.register_action('gcc.compile.c++.pch',
'"$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
engine.register_action('gcc.compile.c.pch',
'"$(CONFIG_COMMAND)" -x c-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"')
def gcc_compile_cpp(targets, sources, properties):
# Some extensions are compiled as C++ by default. For others, we need to
# pass -x c++. We could always pass -x c++ but distcc does not work with it.
extension = os.path.splitext (sources [0]) [1]
lang = ''
if not extension in ['.cc', '.cp', '.cxx', '.cpp', '.c++', '.C']:
lang = '-x c++'
get_manager().engine().set_target_variable (targets, 'LANG', lang)
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
def gcc_compile_c(targets, sources, properties):
engine = get_manager().engine()
# If we use the name g++ then default file suffix -> language mapping does
# not work. So have to pass -x option. Maybe, we can work around this by
# allowing the user to specify both C and C++ compiler names.
#if $(>:S) != .c
#{
engine.set_target_variable (targets, 'LANG', '-x c')
#}
engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE'))
engine.register_action(
'gcc.compile.c++',
'"$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-128 $(OPTIONS) ' +
'$(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" ' +
'-c -o "$(<:W)" "$(>:W)"',
function=gcc_compile_cpp,
bound_list=['PCH_FILE'])
engine.register_action(
'gcc.compile.c',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) ' +
'-I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_c,
bound_list=['PCH_FILE'])
engine.register_action(
'gcc.compile.c++.preprocess',
function=gcc_compile_cpp,
bound_list=['PCH_FILE'],
command="""
$(CONFIG_COMMAND) $(LANG) -ftemplate-depth-$(TEMPLATE_DEPTH) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" "$(>:W)" -E >"$(<:W)"
"""
)
engine.register_action(
'gcc.compile.c.preprocess',
function=gcc_compile_c,
bound_list=['PCH_FILE'],
command="""
$(CONFIG_COMMAND) $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" "$(>)" -E >$(<)
"""
)
def gcc_compile_asm(targets, sources, properties):
get_manager().engine().set_target_variable(targets, 'LANG', '-x assembler-with-cpp')
engine.register_action(
'gcc.compile.asm',
'"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"',
function=gcc_compile_asm)
class GccLinkingGenerator(unix.UnixLinkingGenerator):
"""
The class which check that we don't try to use the <runtime-link>static
property while creating or using shared library, since it's not supported by
gcc/libc.
"""
def run(self, project, name, ps, sources):
# TODO: Replace this with the use of a target-os property.
no_static_link = False
if bjam.variable('UNIX'):
no_static_link = True;
##FIXME: what does this mean?
## {
## switch [ modules.peek : JAMUNAME ]
## {
## case * : no-static-link = true ;
## }
## }
reason = None
if no_static_link and ps.get('runtime-link') == 'static':
if ps.get('link') == 'shared':
reason = "On gcc, DLL can't be build with '<runtime-link>static'."
elif type.is_derived(self.target_types[0], 'EXE'):
for s in sources:
source_type = s.type()
if source_type and type.is_derived(source_type, 'SHARED_LIB'):
reason = "On gcc, using DLLS together with the " +\
"<runtime-link>static options is not possible "
if reason:
print 'warning:', reason
print 'warning:',\
"It is suggested to use '<runtime-link>static' together",\
"with '<link>static'." ;
return
else:
generated_targets = unix.UnixLinkingGenerator.run(self, project,
name, ps, sources)
return generated_targets
if on_windows():
flags('gcc.link.dll', '.IMPLIB-COMMAND', [], ['-Wl,--out-implib,'])
generators.register(
GccLinkingGenerator('gcc.link', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
[ 'EXE' ],
[ '<toolset>gcc' ]))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'],
['IMPORT_LIB', 'SHARED_LIB'],
['<toolset>gcc']))
else:
generators.register(
GccLinkingGenerator('gcc.link', True,
['LIB', 'OBJ'],
['EXE'],
['<toolset>gcc']))
generators.register(
GccLinkingGenerator('gcc.link.dll', True,
['LIB', 'OBJ'],
['SHARED_LIB'],
['<toolset>gcc']))
# Declare flags for linking.
# First, the common flags.
flags('gcc.link', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.link', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.link', 'USER_OPTIONS', [], ['<linkflags>'])
flags('gcc.link', 'LINKPATH', [], ['<library-path>'])
flags('gcc.link', 'FINDLIBS-ST', [], ['<find-static-library>'])
flags('gcc.link', 'FINDLIBS-SA', [], ['<find-shared-library>'])
flags('gcc.link', 'LIBRARIES', [], ['<library-file>'])
# For <runtime-link>static we made sure there are no dynamic libraries in the
# link. On HP-UX not all system libraries exist as archived libraries (for
# example, there is no libunwind.a), so, on this platform, the -static option
# cannot be specified.
if os_name() != 'HPUX':
flags('gcc.link', 'OPTIONS', ['<runtime-link>static'], ['-static'])
# Now, the vendor specific flags.
# The parameter linker can be either gnu, darwin, osf, hpux or sun.
def init_link_flags(toolset, linker, condition):
"""
Now, the vendor specific flags.
The parameter linker can be either gnu, darwin, osf, hpux or sun.
"""
toolset_link = toolset + '.link'
if linker == 'gnu':
# Strip the binary when no debugging is needed. We use --strip-all flag
# as opposed to -s since icc (intel's compiler) is generally
# option-compatible with and inherits from the gcc toolset, but does not
# support -s.
# FIXME: what does unchecked translate to?
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ;
flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ;
# gnu ld has the ability to change the search behaviour for libraries
# referenced by -l switch. These modifiers are -Bstatic and -Bdynamic
# and change search for -l switches that follow them. The following list
# shows the tried variants.
# The search stops at the first variant that has a match.
# *nix: -Bstatic -lxxx
# libxxx.a
#
# *nix: -Bdynamic -lxxx
# libxxx.so
# libxxx.a
#
# windows (mingw,cygwin) -Bstatic -lxxx
# libxxx.a
# xxx.lib
#
# windows (mingw,cygwin) -Bdynamic -lxxx
# libxxx.dll.a
# xxx.dll.a
# libxxx.a
# xxx.lib
# cygxxx.dll (*)
# libxxx.dll
# xxx.dll
# libxxx.a
#
# (*) This is for cygwin
# Please note that -Bstatic and -Bdynamic are not a guarantee that a
# static or dynamic lib indeed gets linked in. The switches only change
# search patterns!
# On *nix mixing shared libs with static runtime is not a good idea.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>shared', condition),
['-Wl,-Bdynamic']) # : unchecked ;
# On windows allow mixing of static and dynamic libs with static
# runtime.
flags(toolset_link, 'FINDLIBS-ST-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
flags(toolset_link, 'FINDLIBS-SA-PFX',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bdynamic']) # : unchecked ;
flags(toolset_link, 'OPTIONS',
map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition),
['-Wl,-Bstatic']) # : unchecked ;
elif linker == 'darwin':
# On Darwin, the -s option to ld does not work unless we pass -static,
# and passing -static unconditionally is a bad idea. So, don't pass -s.
# at all, darwin.jam will use separate 'strip' invocation.
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ;
elif linker == 'osf':
# No --strip-all, just -s.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# This does not supports -R.
flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ;
# -rpath-link is not supported at all.
elif linker == 'sun':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s'])
# : unchecked ;
flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ;
# Solaris linker does not have a separate -rpath-link, but allows to use
# -L for the same purpose.
flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ;
# This permits shared libraries with non-PIC code on Solaris.
# VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the
# following is not needed. Whether -fPIC should be hardcoded, is a
# separate question.
# AH, 2004/10/16: it is still necessary because some tests link against
# static libraries that were compiled without PIC.
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text'])
# : unchecked ;
elif linker == 'hpux':
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition),
['-Wl,-s']) # : unchecked ;
flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition),
['-fPIC']) # : unchecked ;
else:
# FIXME:
errors.user_error(
"$(toolset) initialization: invalid linker '$(linker)' " +
"The value '$(linker)' specified for <linker> is not recognized. " +
"Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'")
# Declare actions for linking.
def gcc_link(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
# Serialize execution of the 'link' action, since running N links in
# parallel is just slower. For now, serialize only gcc links, it might be a
# good idea to serialize all links.
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.register_action(
'gcc.link',
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'-Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" ' +
'$(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function=gcc_link,
bound_list=['LIBRARIES'])
# Default value. Mostly for the sake of intel-linux that inherits from gcc, but
# does not have the same logic to set the .AR variable. We can put the same
# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is
# always available.
__AR = 'ar'
flags('gcc.archive', 'AROPTIONS', [], ['<archiveflags>'])
def gcc_archive(targets, sources, properties):
# Always remove archive and start again. Here's rationale from
#
# Andre Hentz:
#
# I had a file, say a1.c, that was included into liba.a. I moved a1.c to
# a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd
# errors. After some debugging I traced it back to the fact that a1.o was
# *still* in liba.a
#
# Rene Rivera:
#
# Originally removing the archive was done by splicing an RM onto the
# archive action. That makes archives fail to build on NT when they have
# many files because it will no longer execute the action directly and blow
# the line length limit. Instead we remove the file in a different action,
# just before building the archive.
clean = targets[0] + '(clean)'
bjam.call('TEMPORARY', clean)
bjam.call('NOCARE', clean)
engine = get_manager().engine()
engine.set_target_variable('LOCATE', clean, bjam.call('get-target-variable', targets, 'LOCATE'))
engine.add_dependency(clean, sources)
engine.add_dependency(targets, clean)
engine.set_update_action('common.RmTemps', clean, targets)
# Declare action for creating static libraries.
# The letter 'r' means to add files to the archive with replacement. Since we
# remove archive, we don't care about replacement, but there's no option "add
# without replacement".
# The letter 'c' suppresses the warning in case the archive does not exists yet.
# That warning is produced only on some platforms, for whatever reasons.
engine.register_action('gcc.archive',
'''"$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)"
"$(.RANLIB)" "$(<)"
''',
function=gcc_archive,
flags=['piecemeal'])
def gcc_link_dll(targets, sources, properties):
engine = get_manager().engine()
engine.set_target_variable(targets, 'SPACE', ' ')
engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore')
engine.set_target_variable(targets, "HAVE_SONAME", HAVE_SONAME)
engine.set_target_variable(targets, "SONAME_OPTION", SONAME_OPTION)
engine.register_action(
'gcc.link.dll',
# Differ from 'link' above only by -shared.
'"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' +
'-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' +
'"$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" ' +
'$(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) ' +
'-shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' +
'-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' +
'$(OPTIONS) $(USER_OPTIONS)',
function = gcc_link_dll,
bound_list=['LIBRARIES'])
# Set up threading support. It's somewhat contrived, so perform it at the end,
# to avoid cluttering other code.
if on_windows():
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-mthreads'])
elif bjam.variable('UNIX'):
jamuname = bjam.variable('JAMUNAME')
host_os_name = jamuname[0]
if host_os_name.startswith('SunOS'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthreads'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
elif host_os_name == 'BeOS':
# BeOS has no threading options, don't set anything here.
pass
elif host_os_name == 'Haiku':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-lroot'])
# there is no -lrt on Haiku, and -pthread is implicit
elif host_os_name.endswith('BSD'):
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD
elif host_os_name == 'DragonFly':
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
# there is no -lrt on BSD - DragonFly is a FreeBSD variant,
# which anoyingly doesn't say it's a *BSD.
elif host_os_name == 'IRIX':
# gcc on IRIX does not support multi-threading, don't set anything here.
pass
elif host_os_name == 'Darwin':
# Darwin has no threading options, don't set anything here.
pass
else:
flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread'])
flags('gcc', 'FINDLIBS-SA', [], ['rt'])
def cpu_flags(toolset, variable, architecture, instruction_set, values, default=None):
#FIXME: for some reason this fails. Probably out of date feature code
## if default:
## flags(toolset, variable,
## ['<architecture>' + architecture + '/<instruction-set>'],
## values)
flags(toolset, variable,
#FIXME: same as above
[##'<architecture>/<instruction-set>' + instruction_set,
'<architecture>' + architecture + '/<instruction-set>' + instruction_set],
values)
# Set architecture/instruction-set options.
#
# x86 and compatible
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'native', ['-march=native'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i486', ['-march=i486'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i586', ['-march=i586'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'i686', ['-march=i686'], default=True)
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium', ['-march=pentium'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-mmx', ['-march=pentium-mmx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentiumpro', ['-march=pentiumpro'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium2', ['-march=pentium2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3', ['-march=pentium3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3m', ['-march=pentium3m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-m', ['-march=pentium-m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4', ['-march=pentium4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4m', ['-march=pentium4m'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'prescott', ['-march=prescott'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nocona', ['-march=nocona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core2', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'conroe-l', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'allendale', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'wolfdale', ['-march=core2', '-msse4.1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'merom-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'kentsfield-xe', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'yorksfield', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'penryn', ['-march=core2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'nehalem', ['-march=corei7'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'corei7-avx', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'sandy-bridge', ['-march=corei7-avx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'core-avx-i', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'ivy-bridge', ['-march=core-avx-i'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'haswell', ['-march=core-avx-i', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6', ['-march=k6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-2', ['-march=k6-2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-3', ['-march=k6-3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon', ['-march=athlon'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-tbird', ['-march=athlon-tbird'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-4', ['-march=athlon-4'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-xp', ['-march=athlon-xp'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-mp', ['-march=athlon-mp'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8', ['-march=k8'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron', ['-march=opteron'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64', ['-march=athlon64'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-fx', ['-march=athlon-fx'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'k8-sse3', ['-march=k8-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron-sse3', ['-march=opteron-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64-sse3', ['-march=athlon64-sse3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'amdfam10', ['-march=amdfam10'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'barcelona', ['-march=barcelona'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver1', ['-march=bdver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver2', ['-march=bdver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'bdver3', ['-march=bdver3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver1', ['-march=btver1'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'btver2', ['-march=btver2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip-c6', ['-march=winchip-c6'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip2', ['-march=winchip2'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3', ['-march=c3'])
cpu_flags('gcc', 'OPTIONS', 'x86', 'c3-2', ['-march=c3-2'])
##
cpu_flags('gcc', 'OPTIONS', 'x86', 'atom', ['-march=atom'])
# Sparc
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v7', ['-mcpu=v7'], default=True)
cpu_flags('gcc', 'OPTIONS', 'sparc', 'cypress', ['-mcpu=cypress'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v8', ['-mcpu=v8'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'supersparc', ['-mcpu=supersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite', ['-mcpu=sparclite'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'hypersparc', ['-mcpu=hypersparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite86x', ['-mcpu=sparclite86x'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f930', ['-mcpu=f930'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'f934', ['-mcpu=f934'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclet', ['-mcpu=sparclet'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'tsc701', ['-mcpu=tsc701'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'v9', ['-mcpu=v9'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc', ['-mcpu=ultrasparc'])
cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc3', ['-mcpu=ultrasparc3'])
# RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32'], ['-m32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64'], ['-m64'])
cpu_flags('gcc', 'OPTIONS', 'power', '403', ['-mcpu=403'])
cpu_flags('gcc', 'OPTIONS', 'power', '505', ['-mcpu=505'])
cpu_flags('gcc', 'OPTIONS', 'power', '601', ['-mcpu=601'])
cpu_flags('gcc', 'OPTIONS', 'power', '602', ['-mcpu=602'])
cpu_flags('gcc', 'OPTIONS', 'power', '603', ['-mcpu=603'])
cpu_flags('gcc', 'OPTIONS', 'power', '603e', ['-mcpu=603e'])
cpu_flags('gcc', 'OPTIONS', 'power', '604', ['-mcpu=604'])
cpu_flags('gcc', 'OPTIONS', 'power', '604e', ['-mcpu=604e'])
cpu_flags('gcc', 'OPTIONS', 'power', '620', ['-mcpu=620'])
cpu_flags('gcc', 'OPTIONS', 'power', '630', ['-mcpu=630'])
cpu_flags('gcc', 'OPTIONS', 'power', '740', ['-mcpu=740'])
cpu_flags('gcc', 'OPTIONS', 'power', '7400', ['-mcpu=7400'])
cpu_flags('gcc', 'OPTIONS', 'power', '7450', ['-mcpu=7450'])
cpu_flags('gcc', 'OPTIONS', 'power', '750', ['-mcpu=750'])
cpu_flags('gcc', 'OPTIONS', 'power', '801', ['-mcpu=801'])
cpu_flags('gcc', 'OPTIONS', 'power', '821', ['-mcpu=821'])
cpu_flags('gcc', 'OPTIONS', 'power', '823', ['-mcpu=823'])
cpu_flags('gcc', 'OPTIONS', 'power', '860', ['-mcpu=860'])
cpu_flags('gcc', 'OPTIONS', 'power', '970', ['-mcpu=970'])
cpu_flags('gcc', 'OPTIONS', 'power', '8540', ['-mcpu=8540'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power', ['-mcpu=power'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power2', ['-mcpu=power2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power3', ['-mcpu=power3'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power4', ['-mcpu=power4'])
cpu_flags('gcc', 'OPTIONS', 'power', 'power5', ['-mcpu=power5'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc', ['-mcpu=powerpc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc64', ['-mcpu=powerpc64'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios', ['-mcpu=rios'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios1', ['-mcpu=rios1'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rios2', ['-mcpu=rios2'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rsc', ['-mcpu=rsc'])
cpu_flags('gcc', 'OPTIONS', 'power', 'rs64a', ['-mcpu=rs64'])
# AIX variant of RS/6000 & PowerPC
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32/<target-os>aix'], ['-maix32'])
flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-maix64'])
flags('gcc', 'AROPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-X64'])
| gpl-3.0 |
ferriman/SSandSP | pyxel-test/venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py | 28 | 108277 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
if six.PY2:
return value
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.